From 6b733e9d0ec5acbcc47cc88ff88ffb328e829db5 Mon Sep 17 00:00:00 2001 From: Jamie Pryde Date: Thu, 23 Jan 2025 15:21:27 +0000 Subject: [PATCH] erasure-code: Rewrite the encode_chunks interface for all plugins We have changed the encode_chunks interface to take an in map and an out map. The in map contains the data shards to be encoded. The out map contains the empty buffers that the plugin will write the parity data to. The old non-optimized EC path has been updated to call the new encode_chunks interface from erasurecode.cc The new EC optimizations path calls the encode_chunks interface from ECUtils. Signed-off-by: Jamie Pryde Signed-off-by: Alex Ainscow --- src/erasure-code/ErasureCode.cc | 217 ++++++++- src/erasure-code/ErasureCode.h | 45 +- src/erasure-code/ErasureCodeInterface.h | 120 ++++- src/erasure-code/clay/ErasureCodeClay.cc | 49 ++ src/erasure-code/clay/ErasureCodeClay.h | 22 + src/erasure-code/isa/ErasureCodeIsa.cc | 102 +++++ src/erasure-code/isa/ErasureCodeIsa.h | 8 + .../jerasure/ErasureCodeJerasure.cc | 104 +++++ .../jerasure/ErasureCodeJerasure.h | 9 +- src/erasure-code/lrc/ErasureCodeLrc.cc | 314 ++++++++++++- src/erasure-code/lrc/ErasureCodeLrc.h | 20 +- src/erasure-code/shec/ErasureCodeShec.cc | 169 ++++++- src/erasure-code/shec/ErasureCodeShec.h | 35 +- src/osd/ECBackend.cc | 12 +- src/osd/ECBackend.h | 8 + src/osd/ECBackendL.h | 2 + src/osd/ECCommon.cc | 19 +- src/osd/ECCommonL.cc | 4 + src/osd/ECTypes.h | 47 ++ src/osd/ECUtil.cc | 7 + src/osd/ECUtil.h | 52 +-- src/osd/ECUtilL.cc | 8 + src/osd/ECUtilL.h | 8 +- src/osd/osd_types.h | 2 + src/test/erasure-code/ErasureCodeExample.h | 157 ++++++- src/test/erasure-code/TestErasureCode.cc | 40 +- src/test/erasure-code/TestErasureCodeClay.cc | 5 + .../erasure-code/TestErasureCodeExample.cc | 163 ++++--- src/test/erasure-code/TestErasureCodeIsa.cc | 334 +++++++------- .../erasure-code/TestErasureCodeJerasure.cc | 131 +++--- src/test/erasure-code/TestErasureCodeLrc.cc | 424 +++++++++--------- .../erasure-code/TestErasureCodePlugins.cc | 151 +++---- src/test/erasure-code/TestErasureCodeShec.cc | 122 ++--- .../erasure-code/TestErasureCodeShec_all.cc | 22 +- .../TestErasureCodeShec_arguments.cc | 22 +- .../TestErasureCodeShec_thread.cc | 2 + .../ceph_erasure_code_benchmark.cc | 79 ++-- .../ceph_erasure_code_benchmark.h | 6 +- .../ceph_erasure_code_non_regression.cc | 62 +-- .../erasure-code/ceph-erasure-code-tool.cc | 6 +- 40 files changed, 2278 insertions(+), 831 deletions(-) diff --git a/src/erasure-code/ErasureCode.cc b/src/erasure-code/ErasureCode.cc index 6640acdbc71..7184f1dd714 100644 --- a/src/erasure-code/ErasureCode.cc +++ b/src/erasure-code/ErasureCode.cc @@ -119,11 +119,18 @@ int ErasureCode::sanity_check_k_m(int k, int m, ostream *ss) return 0; } -int ErasureCode::chunk_index(unsigned int i) const +shard_id_t ErasureCode::chunk_index(raw_shard_id_t i) const { - return chunk_mapping.size() > i ? chunk_mapping[i] : i; + return chunk_mapping.size() > uint64_t(i) ? chunk_mapping[static_cast(i)] : shard_id_t(int8_t(i)); } +[[deprecated]] +unsigned int ErasureCode::chunk_index(unsigned int i) const +{ + return static_cast(chunk_mapping.size() > uint64_t(i) ? chunk_mapping[i] : shard_id_t(i)); +} + +[[deprecated]] int ErasureCode::_minimum_to_decode(const set &want_to_read, const set &available_chunks, set *minimum) @@ -143,6 +150,26 @@ int ErasureCode::_minimum_to_decode(const set &want_to_read, return 0; } +int ErasureCode::_minimum_to_decode(const shard_id_set &want_to_read, + const shard_id_set &available_chunks, + shard_id_set *minimum) +{ + if (available_chunks.includes(want_to_read)) { + *minimum = want_to_read; + } else { + unsigned int k = get_data_chunk_count(); + if (available_chunks.size() < (unsigned)k) + return -EIO; + shard_id_set::const_iterator i; + unsigned j; + for (i = available_chunks.begin(), j = 0; j < (unsigned)k; ++i, j++) + minimum->insert(*i); + } + return 0; +} + +IGNORE_DEPRECATED +[[deprecated]] int ErasureCode::minimum_to_decode(const set &want_to_read, const set &available_chunks, map>> *minimum) @@ -159,7 +186,28 @@ int ErasureCode::minimum_to_decode(const set &want_to_read, } return 0; } +END_IGNORE_DEPRECATED + +int ErasureCode::minimum_to_decode(const shard_id_set &want_to_read, + const shard_id_set &available_chunks, + shard_id_set &minimum_set, + shard_id_map>> *minimum_sub_chunks) +{ + int r = _minimum_to_decode(want_to_read, available_chunks, &minimum_set); + if (minimum_sub_chunks == nullptr) return r; + if (r != 0) { + return r; + } + vector> default_subchunks; + default_subchunks.push_back(make_pair(0, get_sub_chunk_count())); + for (auto &&id : minimum_set) { + minimum_sub_chunks->emplace(id, default_subchunks); + } + return 0; +} +IGNORE_DEPRECATED +[[deprecated]] int ErasureCode::minimum_to_decode_with_cost(const set &want_to_read, const map &available, set *minimum) @@ -171,7 +219,22 @@ int ErasureCode::minimum_to_decode_with_cost(const set &want_to_read, available_chunks.insert(i->first); return _minimum_to_decode(want_to_read, available_chunks, minimum); } +END_IGNORE_DEPRECATED +int ErasureCode::minimum_to_decode_with_cost(const shard_id_set &want_to_read, + const shard_id_map &available, + shard_id_set *minimum) +{ + shard_id_set available_chunks; + for (shard_id_map::const_iterator i = available.begin(); + i != available.end(); + ++i) + available_chunks.insert(i->first); + return _minimum_to_decode(want_to_read, available_chunks, minimum); +} + +IGNORE_DEPRECATED +[[deprecated]] int ErasureCode::encode_prepare(const bufferlist &raw, map &encoded) const { @@ -208,7 +271,47 @@ int ErasureCode::encode_prepare(const bufferlist &raw, return 0; } +END_IGNORE_DEPRECATED + +int ErasureCode::encode_prepare(const bufferlist &raw, + shard_id_map &encoded) const +{ + unsigned int k = get_data_chunk_count(); + unsigned int m = get_chunk_count() - k; + unsigned blocksize = get_chunk_size(raw.length()); + unsigned padded_chunks = k - raw.length() / blocksize; + bufferlist prepared = raw; + + for (raw_shard_id_t i; i < k - padded_chunks; ++i) { + bufferlist &chunk = encoded[chunk_index(i)]; + chunk.substr_of(prepared, (int)i * blocksize, blocksize); + chunk.rebuild_aligned_size_and_memory(blocksize, SIMD_ALIGN); + ceph_assert(chunk.is_contiguous()); + } + if (padded_chunks) { + unsigned remainder = raw.length() - (k - padded_chunks) * blocksize; + bufferptr buf(buffer::create_aligned(blocksize, SIMD_ALIGN)); + + raw.begin((k - padded_chunks) * blocksize).copy(remainder, buf.c_str()); + buf.zero(remainder, blocksize - remainder); + encoded[chunk_index(raw_shard_id_t(k - padded_chunks))].push_back(std::move(buf)); + + for (raw_shard_id_t i(k - padded_chunks + 1); i < k; ++i) { + bufferptr buf(buffer::create_aligned(blocksize, SIMD_ALIGN)); + buf.zero(); + encoded[chunk_index(i)].push_back(std::move(buf)); + } + } + for (raw_shard_id_t i(k); i < k + m; ++i) { + bufferlist &chunk = encoded[chunk_index(i)]; + chunk.push_back(buffer::create_aligned(blocksize, SIMD_ALIGN)); + } + + return 0; +} +IGNORE_DEPRECATED +[[deprecated]] int ErasureCode::encode(const set &want_to_encode, const bufferlist &in, map *encoded) @@ -226,7 +329,46 @@ int ErasureCode::encode(const set &want_to_encode, } return 0; } +END_IGNORE_DEPRECATED + +int ErasureCode::encode(const shard_id_set &want_to_encode, + const bufferlist &in, + shard_id_map *encoded) +{ + unsigned int k = get_data_chunk_count(); + unsigned int m = get_chunk_count() - k; + + if (!encoded || !encoded->empty()){ + return -EINVAL; + } + int err = encode_prepare(in, *encoded); + if (err) + return err; + + shard_id_map in_shards(get_chunk_count()); + shard_id_map out_shards(get_chunk_count()); + + for (raw_shard_id_t raw_shard; raw_shard < get_chunk_count(); ++raw_shard) { + shard_id_t shard = chunk_index(raw_shard); + if (!encoded->contains(shard)) continue; + + auto bp = encoded->at(shard).begin().get_current_ptr(); + ceph_assert(bp.length() == encoded->at(shard).length()); + + if (raw_shard < k) in_shards[shard] = bp; + else out_shards[shard] = bp; + } + + encode_chunks(in_shards, out_shards); + for (shard_id_t i; i < k + m; ++i) { + if (want_to_encode.count(i) == 0) + encoded->erase(i);} + + return 0; +} +IGNORE_DEPRECATED +[[deprecated]] int ErasureCode::_decode(const set &want_to_read, const map &chunks, map *decoded) @@ -264,7 +406,63 @@ int ErasureCode::_decode(const set &want_to_read, } return decode_chunks(want_to_read, chunks, decoded); } +END_IGNORE_DEPRECATED +int ErasureCode::_decode(const shard_id_set &want_to_read, + const shard_id_map &chunks, + shard_id_map *decoded) +{ + shard_id_set have; + + if (!decoded || !decoded->empty()){ + return -EINVAL; + } + if (!want_to_read.empty() && chunks.empty()) { + return -1; + } + + for (auto &&[shard, _] : chunks) { + have.insert(shard); + } + if (have.includes(want_to_read)) { + for (auto &&shard : want_to_read) { + (*decoded)[shard] = chunks.at(shard); + } + return 0; + } + unsigned int k = get_data_chunk_count(); + unsigned int m = get_chunk_count() - k; + unsigned blocksize = (*chunks.begin()).second.length(); + shard_id_set erasures; + for (shard_id_t i; i < k + m; ++i) { + if (!chunks.contains(i)) { + bufferlist tmp; + bufferptr ptr(buffer::create_aligned(blocksize, SIMD_ALIGN)); + tmp.push_back(ptr); + tmp.claim_append((*decoded)[i]); + (*decoded)[i].swap(tmp); + erasures.insert(i); + } else { + (*decoded)[i] = chunks.find(i)->second; + (*decoded)[i].rebuild_aligned(SIMD_ALIGN); + } + bufferlist &bl = (*decoded)[i]; + if (bl.length() != bl.begin().get_current_ptr().length()) { + bl.rebuild(); + } + } + shard_id_map in(get_chunk_count()); + shard_id_map out(get_chunk_count()); + for (auto&& [shard, list] : *decoded) { + auto bp = list.begin().get_current_ptr(); + ceph_assert(bp.length() == list.length()); + if (erasures.find(shard) == erasures.end()) in[shard] = bp; + else out[shard] = bp; + } + return decode_chunks(want_to_read, in, out); +} + +[[deprecated]] int ErasureCode::decode(const set &want_to_read, const map &chunks, map *decoded, int chunk_size) @@ -272,13 +470,20 @@ int ErasureCode::decode(const set &want_to_read, return _decode(want_to_read, chunks, decoded); } +int ErasureCode::decode(const shard_id_set &want_to_read, + const shard_id_map &chunks, + shard_id_map *decoded, int chunk_size) +{ + return _decode(want_to_read, chunks, decoded); +} + int ErasureCode::parse(const ErasureCodeProfile &profile, ostream *ss) { return to_mapping(profile, ss); } -const vector &ErasureCode::get_chunk_mapping() const { +const vector &ErasureCode::get_chunk_mapping() const { return chunk_mapping; } @@ -291,7 +496,7 @@ int ErasureCode::to_mapping(const ErasureCodeProfile &profile, vector coding_chunk_mapping; for(std::string::iterator it = mapping.begin(); it != mapping.end(); ++it) { if (*it == 'D') - chunk_mapping.push_back(position); + chunk_mapping.push_back(shard_id_t(position)); else coding_chunk_mapping.push_back(position); position++; @@ -353,6 +558,8 @@ int ErasureCode::to_string(const std::string &name, return 0; } +IGNORE_DEPRECATED +[[deprecated]] int ErasureCode::decode_concat(const set& want_to_read, const map &chunks, bufferlist *decoded) @@ -375,6 +582,7 @@ int ErasureCode::decode_concat(const set& want_to_read, return r; } +[[deprecated]] int ErasureCode::decode_concat(const map &chunks, bufferlist *decoded) { @@ -384,4 +592,5 @@ int ErasureCode::decode_concat(const map &chunks, } return decode_concat(want_to_read, chunks, decoded); } +END_IGNORE_DEPRECATED } diff --git a/src/erasure-code/ErasureCode.h b/src/erasure-code/ErasureCode.h index 469bcf6019b..3733649a620 100644 --- a/src/erasure-code/ErasureCode.h +++ b/src/erasure-code/ErasureCode.h @@ -31,7 +31,7 @@ class ErasureCode : public ErasureCodeInterface { public: static const unsigned SIMD_ALIGN; - std::vector chunk_mapping; + std::vector chunk_mapping; ErasureCodeProfile _profile; // for CRUSH rule @@ -64,37 +64,72 @@ class ErasureCode : public ErasureCodeInterface { return 1; } + [[deprecated]] virtual int _minimum_to_decode(const std::set &want_to_read, const std::set &available_chunks, std::set *minimum); + virtual int _minimum_to_decode(const shard_id_set &want_to_read, + const shard_id_set &available_chunks, + shard_id_set *minimum); + + [[deprecated]] int minimum_to_decode(const std::set &want_to_read, const std::set &available, std::map>> *minimum) override; + int minimum_to_decode(const shard_id_set &want_to_read, + const shard_id_set &available, + shard_id_set &minimum_set, + mini_flat_map>> *minimum_sub_chunks) override; + + [[deprecated]] int minimum_to_decode_with_cost(const std::set &want_to_read, const std::map &available, std::set *minimum) override; + int minimum_to_decode_with_cost(const shard_id_set &want_to_read, + const mini_flat_map &available, + shard_id_set *minimum) override; + + int encode_prepare(const bufferlist &raw, + mini_flat_map &encoded) const; + + [[deprecated]] int encode_prepare(const bufferlist &raw, std::map &encoded) const; + int encode(const shard_id_set &want_to_encode, + const bufferlist &in, + mini_flat_map *encoded) override; + + [[deprecated]] int encode(const std::set &want_to_encode, const bufferlist &in, std::map *encoded) override; + [[deprecated]] int decode(const std::set &want_to_read, const std::map &chunks, std::map *decoded, int chunk_size) override; + int decode(const shard_id_set &want_to_read, + const mini_flat_map &chunks, + mini_flat_map *decoded, int chunk_size) override; + + [[deprecated]] virtual int _decode(const std::set &want_to_read, const std::map &chunks, std::map *decoded); - const std::vector &get_chunk_mapping() const override; + virtual int _decode(const shard_id_set &want_to_read, + const mini_flat_map &chunks, + mini_flat_map *decoded); + + const std::vector &get_chunk_mapping() const override; int to_mapping(const ErasureCodeProfile &profile, std::ostream *ss); @@ -116,9 +151,11 @@ class ErasureCode : public ErasureCodeInterface { const std::string &default_value, std::ostream *ss); + [[deprecated]] int decode_concat(const std::set &want_to_read, const std::map &chunks, bufferlist *decoded) override; + [[deprecated]] int decode_concat(const std::map &chunks, bufferlist *decoded) override; @@ -137,7 +174,9 @@ class ErasureCode : public ErasureCodeInterface { int parse(const ErasureCodeProfile &profile, std::ostream *ss); private: - int chunk_index(unsigned int i) const; + [[deprecated]] + unsigned int chunk_index(unsigned int i) const; + shard_id_t chunk_index(raw_shard_id_t i) const; }; } diff --git a/src/erasure-code/ErasureCodeInterface.h b/src/erasure-code/ErasureCodeInterface.h index 2be9387f2d1..8e2f5cdd1e5 100644 --- a/src/erasure-code/ErasureCodeInterface.h +++ b/src/erasure-code/ErasureCodeInterface.h @@ -147,6 +147,18 @@ #include #include #include "include/buffer_fwd.h" +#include "osd/osd_types.h" + +#define IGNORE_DEPRECATED \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\" ") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") + +#define END_IGNORE_DEPRECATED \ + _Pragma("clang pop") \ + _Pragma("GCC pop") + class CrushWrapper; @@ -294,6 +306,14 @@ namespace ceph { * subchunk index offsets, count. * @return **0** on success or a negative errno on error. */ + virtual int minimum_to_decode(const shard_id_set &want_to_read, + const shard_id_set &available, + shard_id_set &minimum_set, + mini_flat_map>> + *minimum_sub_chunks) = 0; + + // Interface for legacy EC. + [[deprecated]] virtual int minimum_to_decode(const std::set &want_to_read, const std::set &available, std::map>> @@ -323,6 +343,11 @@ namespace ceph { * @param [out] minimum chunk indexes to retrieve * @return **0** on success or a negative errno on error. */ + virtual int minimum_to_decode_with_cost(const shard_id_set &want_to_read, + const shard_id_map &available, + shard_id_set *minimum) = 0; + + [[deprecated]] virtual int minimum_to_decode_with_cost(const std::set &want_to_read, const std::map &available, std::set *minimum) = 0; @@ -337,6 +362,10 @@ namespace ceph { virtual size_t get_minimum_granularity() = 0; /** + * Note: The encode function is used for the older EC code path + * that is used when EC optimizations are turned off. EC optimizations + * are turned off for new pools by default. + * * Encode the content of **in** and store the result in * **encoded**. All buffers pointed to by **encoded** have the * same size. The **encoded** map contains at least all chunk @@ -371,14 +400,55 @@ namespace ceph { * @param [out] encoded map chunk indexes to chunk data * @return **0** on success or a negative errno on error. */ - virtual int encode(const std::set &want_to_encode, + virtual int encode(const shard_id_set &want_to_encode, const bufferlist &in, - std::map *encoded) = 0; - + shard_id_map *encoded) = 0; + [[deprecated]] + virtual int encode(const std::set &want_to_encode, + const bufferlist &in, + std::map *encoded) = 0; + [[deprecated]] virtual int encode_chunks(const std::set &want_to_encode, std::map *encoded) = 0; + /** + * Note: The encode_chunks function is used by the older EC code path + * that is used when EC optimizations are turned off. It is also used + * when EC optimizations are turned on. + * + * Encode the content of **in** and store the result in + * **out**. All buffers pointed to by **in** and **out** have the + * same size. + * + * The data chunks to be encoded are provided in the in map, these buffers + * are considered to be immutable (neither the bufferptr or the contents + * of the buffer may be changed). Some of these bufferptrs may be a special + * bufferptr representing a buffer of zeros. There is no way to represent + * a buffer for a chunk that consists of a mixture of data and zeros, + * the caller is expected to make multiple calls to encode_chunks using smaller + * buffers if this optimzation is worthwhile. The bufferptrs are expected to + * have suitable alignment (page alignment) and are a single contiguous + * range of memory. The caller is likely to have a bufferlist per chunk + * and may either need to make multiple calls to encode_chunks or use + * rebuild_and_align to create a single contiguous buffer for each chunk. + * + * The coding parity chunk bufferptrs are allocated by the caller and + * populated in the out map. These bufferptrs are expected to be written to + * by the erasure code plugin. Again the bufferptrs are expected to have + * suitable alignment and are a single contiguous range of memory. + * The erasure code plugin may replace one or more of these bufferptrs + * with a special bufferptr representing a buffer of zeros. + * + * Returns 0 on success. + * + * @param [in] in map of data shards to be encoded + * @param [out] out map of empty buffers for parity to be written to + * @return **0** on success or a negative errno on error. + */ + virtual int encode_chunks(const shard_id_map &in, + shard_id_map &out) = 0; + /** * Calculate the delta between the old_data and new_data buffers using xor, * (or plugin-specific implementation) and returns the result in the @@ -426,10 +496,13 @@ namespace ceph { * @param [in] new_data second buffer to xor * @param [out] delta buffer containing the delta of old_data and new_data */ - virtual void apply_delta(const std::map &in, - std::map &out) = 0; + virtual void apply_delta(const shard_id_map &in, + shard_id_map &out) = 0; /** + * N.B This function is not used when EC optimizations are + * turned on for the pool. + * * Decode the **chunks** and store at least **want_to_read** * chunks in **decoded**. * @@ -463,10 +536,43 @@ namespace ceph { * @param [in] chunk_size chunk size * @return **0** on success or a negative errno on error. */ + virtual int decode(const shard_id_set &want_to_read, + const shard_id_map &chunks, + shard_id_map *decoded, int chunk_size) = 0; + [[deprecated]] virtual int decode(const std::set &want_to_read, const std::map &chunks, std::map *decoded, int chunk_size) = 0; + /** + * Decode the **in** map and store at least **want_to_read** + * shards in the **out** map. + * + * There must be enough shards in the **in** map( as returned by + * **minimum_to_decode** or **minimum_to_decode_with_cost** ) to + * perform a successful decoding of all shards listed in + * **want_to_read**. + * + * All buffers pointed to by **in** must have the same size. + * **out** must contain empty buffers that are the same size as the + * **in*** buffers. + * + * On success, the **out** map may contain more shards than + * required by **want_to_read** and they can safely be used by the + * caller. + * + * Returns 0 on success. + * + * @param [in] want_to_read shard indexes to be decoded + * @param [in] in map of available shard indexes to shard data + * @param [out] out map of shard indexes that nede to be decoded to empty buffers + * @return **0** on success or a negative errno on error. + */ + virtual int decode_chunks(const shard_id_set &want_to_read, + shard_id_map &in, + shard_id_map &out) = 0; + + [[deprecated]] virtual int decode_chunks(const std::set &want_to_read, const std::map &chunks, std::map *decoded) = 0; @@ -504,7 +610,7 @@ namespace ceph { * * @return vector list of indices of chunks to be remapped */ - virtual const std::vector &get_chunk_mapping() const = 0; + virtual const std::vector &get_chunk_mapping() const = 0; /** * Decode the first **get_data_chunk_count()** **chunks** and @@ -520,9 +626,11 @@ namespace ceph { * will be concatenated into `decoded` in index order * @return **0** on success or a negative errno on error. */ + [[deprecated]] virtual int decode_concat(const std::set& want_to_read, const std::map &chunks, bufferlist *decoded) = 0; + [[deprecated]] virtual int decode_concat(const std::map &chunks, bufferlist *decoded) = 0; diff --git a/src/erasure-code/clay/ErasureCodeClay.cc b/src/erasure-code/clay/ErasureCodeClay.cc index 265756352c1..0529f486d9c 100644 --- a/src/erasure-code/clay/ErasureCodeClay.cc +++ b/src/erasure-code/clay/ErasureCodeClay.cc @@ -35,6 +35,11 @@ #define LARGEST_VECTOR_WORDSIZE 16 #define talloc(type, num) (type *) malloc(sizeof(type)*(num)) +/* The new EC API work for Clay requires significant testing. We ignore all + * deprecated function use in this file until that refactor is done. + */ +IGNORE_DEPRECATED + using namespace std; using namespace ceph; static ostream& _prefix(std::ostream* _dout) @@ -100,6 +105,7 @@ size_t ErasureCodeClay::get_minimum_granularity() return mds.erasure_code->get_minimum_granularity(); } +[[deprecated]] int ErasureCodeClay::minimum_to_decode(const set &want_to_read, const set &available, map>> *minimum) @@ -111,6 +117,7 @@ int ErasureCodeClay::minimum_to_decode(const set &want_to_read, } } +[[deprecated]] int ErasureCodeClay::decode(const set &want_to_read, const map &chunks, map *decoded, int chunk_size) @@ -161,6 +168,46 @@ int ErasureCodeClay::encode_chunks(const set &want_to_encode, return res; } +#if 0 \ +/* This code was partially tested, so keeping code, but we need more + * refactoring and testing before it is ready for production. + */ +int ErasureCodeClay::encode_chunks(const std::map &in, + std::map &out) +{ + map chunks; + set parity_chunks; + unsigned int size = 0; + auto& nonconst_in = const_cast&>(in); + + for (auto &&[shard, ptr] : nonconst_in) { + if (size == 0) size = ptr.length(); + else ceph_assert(size == ptr.length()); + chunks[shard].append(nonconst_in[shard]); + } + + for (auto &&[shard, ptr] : out) { + if (size == 0) size = ptr.length(); + else ceph_assert(size == ptr.length()); + chunks[shard+nu].append(out[shard]); + parity_chunks.insert(shard+nu); + } + + for (int i = k; i < k + nu; i++) { + bufferptr buf(buffer::create_aligned(size, SIMD_ALIGN)); + buf.zero(); + chunks[i].push_back(std::move(buf)); + } + + int res = decode_layered(parity_chunks, &chunks); + for (int i = k ; i < k + nu; i++) { + // need to clean some of the intermediate chunks here!! + chunks[i].clear(); + } + return res; +} +#endif + int ErasureCodeClay::decode_chunks(const set &want_to_read, const map &chunks, map *decoded) @@ -900,3 +947,5 @@ void ErasureCodeClay::get_plane_vector(int z, int* z_vec) z = (z - z_vec[t-1-i]) / q; } } + +END_IGNORE_DEPRECATED diff --git a/src/erasure-code/clay/ErasureCodeClay.h b/src/erasure-code/clay/ErasureCodeClay.h index 500ff2d5eff..0391f614b76 100644 --- a/src/erasure-code/clay/ErasureCodeClay.h +++ b/src/erasure-code/clay/ErasureCodeClay.h @@ -74,21 +74,43 @@ public: size_t get_minimum_granularity() override; + using ErasureCode::minimum_to_decode; int minimum_to_decode(const std::set &want_to_read, const std::set &available, std::map>> *minimum) override; + using ErasureCode::decode; int decode(const std::set &want_to_read, const std::map &chunks, std::map *decoded, int chunk_size) override; + [[deprecated]] int encode_chunks(const std::set &want_to_encode, std::map *encoded) override; + // Stub for new encode chunks interface. Can be deleted once new EC is + // supported for all plugins. + int encode_chunks(const shard_id_map &in, + shard_id_map &out) override + { + ceph_abort_msg("Not implemented for this plugin"); + } + + [[deprecated]] int decode_chunks(const std::set &want_to_read, const std::map &chunks, std::map *decoded) override; + + // Stub for new encode chunks interface. Can be deleted once new EC is + // supported for all plugins. + virtual int decode_chunks(const shard_id_set &want_to_read, + shard_id_map &in, + shard_id_map &out) + { + ceph_abort_msg("Not implemented for this plugin"); + } + int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override; int is_repair(const std::set &want_to_read, diff --git a/src/erasure-code/isa/ErasureCodeIsa.cc b/src/erasure-code/isa/ErasureCodeIsa.cc index b6a02a3d66e..78c61fc50b3 100644 --- a/src/erasure-code/isa/ErasureCodeIsa.cc +++ b/src/erasure-code/isa/ErasureCodeIsa.cc @@ -114,6 +114,108 @@ int ErasureCodeIsa::decode_chunks(const set &want_to_read, return isa_decode(erasures, data, coding, blocksize); } +int ErasureCodeIsa::encode_chunks(const shard_id_map &in, + shard_id_map &out) +{ + char *chunks[k + m]; //TODO don't use variable length arrays + memset(chunks, 0, sizeof(char*) * (k + m)); + uint64_t size = 0; + + for (auto &&[shard, ptr] : in) { + if (size == 0) size = ptr.length(); + else ceph_assert(size == ptr.length()); + chunks[static_cast(shard)] = const_cast(ptr.c_str()); + } + + for (auto &&[shard, ptr] : out) { + if (size == 0) size = ptr.length(); + else ceph_assert(size == ptr.length()); + chunks[static_cast(shard)] = ptr.c_str(); + } + + char *zeros = nullptr; + + for (shard_id_t i; i < k + m; ++i) { + if (in.contains(i) || out.contains(i)) continue; + + if (zeros == nullptr) { + zeros = (char*)malloc(size); + memset(zeros, 0, size); + } + + chunks[static_cast(i)] = zeros; + } + + isa_encode(&chunks[0], &chunks[k], size); + + if (zeros != nullptr) free(zeros); + + return 0; +} + +int ErasureCodeIsa::decode_chunks(const shard_id_set &want_to_read, + shard_id_map &in, + shard_id_map &out) +{ + unsigned int size = 0; + shard_id_set erasures_set; + shard_id_set to_free; + erasures_set.insert_range(shard_id_t(0), k + m); + int erasures[k + m + 1]; + int erasures_count = 0; + char *data[k]; + char *coding[m]; + memset(data, 0, sizeof(char*) * k); + memset(coding, 0, sizeof(char*) * m); + + for (auto &&[shard, ptr] : in) { + if (size == 0) size = ptr.length(); + else ceph_assert(size == ptr.length()); + if (shard < k) { + data[static_cast(shard)] = const_cast(ptr.c_str()); + } + else { + coding[static_cast(shard) - k] = const_cast(ptr.c_str()); + } + erasures_set.erase(shard); + } + + for (auto &&[shard, ptr] : out) { + if (size == 0) size = ptr.length(); + else ceph_assert(size == ptr.length()); + if (shard < k) { + data[static_cast(shard)] = const_cast(ptr.c_str()); + } + else { + coding[static_cast(shard) - k] = const_cast(ptr.c_str()); + } + } + + for (int i = 0; i < k + m; i++) { + char **buf = i < k ? &data[i] : &coding[i - k]; + if (*buf == nullptr) { + *buf = (char *)malloc(size); + to_free.insert(shard_id_t(i)); + } + } + + for (auto && shard : erasures_set) { + erasures[erasures_count++] = static_cast(shard); + } + + + erasures[erasures_count] = -1; + ceph_assert(erasures_count > 0); + int r = isa_decode(erasures, data, coding, size); + for (auto & shard : to_free) { + int i = static_cast(shard); + char **buf = i < k ? &data[i] : &coding[i - k]; + free(*buf); + *buf = nullptr; + } + return r; +} + // ----------------------------------------------------------------------------- void diff --git a/src/erasure-code/isa/ErasureCodeIsa.h b/src/erasure-code/isa/ErasureCodeIsa.h index 3ffc1845841..e302eec0bc1 100644 --- a/src/erasure-code/isa/ErasureCodeIsa.h +++ b/src/erasure-code/isa/ErasureCodeIsa.h @@ -88,12 +88,19 @@ public: unsigned int get_chunk_size(unsigned int stripe_width) const override; + [[deprecated]] int encode_chunks(const std::set &want_to_encode, std::map *encoded) override; + int encode_chunks(const shard_id_map &in, + shard_id_map &out) override; + [[deprecated]] int decode_chunks(const std::set &want_to_read, const std::map &chunks, std::map *decoded) override; + int decode_chunks(const shard_id_set &want_to_read, + shard_id_map &in, + shard_id_map &out) override; int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override; @@ -179,5 +186,6 @@ public: int parse(ceph::ErasureCodeProfile &profile, std::ostream *ss) override; }; +static_assert(!std::is_abstract()); #endif diff --git a/src/erasure-code/jerasure/ErasureCodeJerasure.cc b/src/erasure-code/jerasure/ErasureCodeJerasure.cc index 29d4d4b5154..7f6f28d0194 100644 --- a/src/erasure-code/jerasure/ErasureCodeJerasure.cc +++ b/src/erasure-code/jerasure/ErasureCodeJerasure.cc @@ -102,6 +102,7 @@ unsigned int ErasureCodeJerasure::get_chunk_size(unsigned int stripe_width) cons } } +[[deprecated]] int ErasureCodeJerasure::encode_chunks(const set &want_to_encode, map *encoded) { @@ -112,6 +113,46 @@ int ErasureCodeJerasure::encode_chunks(const set &want_to_encode, return 0; } +int ErasureCodeJerasure::encode_chunks(const shard_id_map &in, + shard_id_map &out) +{ + char *chunks[k + m]; //TODO don't use variable length arrays + memset(chunks, 0, sizeof(char*) * (k + m)); + uint64_t size = 0; + + for (auto &&[shard, ptr] : in) { + if (size == 0) size = ptr.length(); + else ceph_assert(size == ptr.length()); + chunks[static_cast(shard)] = const_cast(ptr.c_str()); + } + + for (auto &&[shard, ptr] : out) { + if (size == 0) size = ptr.length(); + else ceph_assert(size == ptr.length()); + chunks[static_cast(shard)] = ptr.c_str(); + } + + char *zeros = nullptr; + + for (shard_id_t i; i < k + m; ++i) { + if (in.contains(i) || out.contains(i)) continue; + + if (zeros == nullptr) { + zeros = (char*)malloc(size); + memset(zeros, 0, size); + } + + chunks[static_cast(i)] = zeros; + } + + jerasure_encode(&chunks[0], &chunks[k], size); + + if (zeros != nullptr) free(zeros); + + return 0; +} + +[[deprecated]] int ErasureCodeJerasure::decode_chunks(const set &want_to_read, const map &chunks, map *decoded) @@ -137,6 +178,69 @@ int ErasureCodeJerasure::decode_chunks(const set &want_to_read, return jerasure_decode(erasures, data, coding, blocksize); } +int ErasureCodeJerasure::decode_chunks(const shard_id_set &want_to_read, + shard_id_map &in, + shard_id_map &out) +{ + unsigned int size = 0; + shard_id_set erasures_set; + shard_id_set to_free; + erasures_set.insert_range(shard_id_t(0), k + m); + int erasures[k + m + 1]; + int erasures_count = 0; + char *data[k]; + char *coding[m]; + memset(data, 0, sizeof(char*) * k); + memset(coding, 0, sizeof(char*) * m); + + for (auto &&[shard, ptr] : in) { + if (size == 0) size = ptr.length(); + else ceph_assert(size == ptr.length()); + if (shard < k) { + data[static_cast(shard)] = const_cast(ptr.c_str()); + } + else { + coding[static_cast(shard) - k] = const_cast(ptr.c_str()); + } + erasures_set.erase(shard); + } + + for (auto &&[shard, ptr] : out) { + if (size == 0) size = ptr.length(); + else ceph_assert(size == ptr.length()); + if (shard < k) { + data[static_cast(shard)] = const_cast(ptr.c_str()); + } + else { + coding[static_cast(shard) - k] = const_cast(ptr.c_str()); + } + } + + for (int i = 0; i < k + m; i++) { + char **buf = i < k ? &data[i] : &coding[i - k]; + if (*buf == nullptr) { + *buf = (char *)malloc(size); + to_free.insert(shard_id_t(i)); + } + } + + for (auto && shard : erasures_set) { + erasures[erasures_count++] = static_cast(shard); + } + + + erasures[erasures_count] = -1; + ceph_assert(erasures_count > 0); + int r = jerasure_decode(erasures, data, coding, size); + for (auto & shard : to_free) { + int i = static_cast(shard); + char **buf = i < k ? &data[i] : &coding[i - k]; + free(*buf); + *buf = nullptr; + } + return r; +} + void ErasureCodeJerasure::encode_delta(const bufferptr &old_data, const bufferptr &new_data, bufferptr *delta_maybe_in_place) diff --git a/src/erasure-code/jerasure/ErasureCodeJerasure.h b/src/erasure-code/jerasure/ErasureCodeJerasure.h index 18e54ff9332..e14562668c4 100644 --- a/src/erasure-code/jerasure/ErasureCodeJerasure.h +++ b/src/erasure-code/jerasure/ErasureCodeJerasure.h @@ -64,12 +64,19 @@ public: unsigned int get_chunk_size(unsigned int stripe_width) const override; + [[deprecated]] int encode_chunks(const std::set &want_to_encode, - std::map *encoded) override; + std::map *encoded) override; + int encode_chunks(const shard_id_map &in, + shard_id_map &out) override; + [[deprecated]] int decode_chunks(const std::set &want_to_read, const std::map &chunks, std::map *decoded) override; + int decode_chunks(const shard_id_set &want_to_read, + shard_id_map &in, + shard_id_map &out) override; void encode_delta(const ceph::bufferptr &old_data, const ceph::bufferptr &new_data, diff --git a/src/erasure-code/lrc/ErasureCodeLrc.cc b/src/erasure-code/lrc/ErasureCodeLrc.cc index dc62c450da4..278791de3a0 100644 --- a/src/erasure-code/lrc/ErasureCodeLrc.cc +++ b/src/erasure-code/lrc/ErasureCodeLrc.cc @@ -219,8 +219,10 @@ int ErasureCodeLrc::layers_init(ostream *ss) layer.data.push_back(position); if (*it == 'c') layer.coding.push_back(position); - if (*it == 'c' || *it == 'D') - layer.chunks_as_set.insert(position); + if (*it == 'c' || *it == 'D') { + layer.chunks_as_set.insert(position); + layer.chunks_as_shard_set.insert(shard_id_t(position)); + } position++; } layer.chunks = layer.data; @@ -554,6 +556,12 @@ set ErasureCodeLrc::get_erasures(const set &want, return result; } +shard_id_set ErasureCodeLrc::get_erasures(const shard_id_set &want, + const shard_id_set &available) const +{ + return shard_id_set::difference(want, available); +} + unsigned int ErasureCodeLrc::get_chunk_size(unsigned int stripe_width) const { return layers.front().erasure_code->get_chunk_size(stripe_width); @@ -564,8 +572,9 @@ size_t ErasureCodeLrc::get_minimum_granularity() return layers.front().erasure_code->get_minimum_granularity(); } -void p(const set &s) { cerr << s; } // for gdb +void p(const shard_id_set &s) { cerr << s; } // for gdb +[[deprecated]] int ErasureCodeLrc::_minimum_to_decode(const set &want_to_read, const set &available_chunks, set *minimum) @@ -737,6 +746,167 @@ int ErasureCodeLrc::_minimum_to_decode(const set &want_to_read, return -EIO; } +int ErasureCodeLrc::_minimum_to_decode(const shard_id_set &want_to_read, + const shard_id_set &available_chunks, + shard_id_set *minimum) +{ + dout(20) << __func__ << " want_to_read " << want_to_read + << " available_chunks " << available_chunks << dendl; + { + shard_id_set erasures_total; + shard_id_set erasures_not_recovered; + shard_id_set erasures_want; + for (shard_id_t i; i < get_chunk_count(); ++i) { + if (available_chunks.count(i) == 0) { + erasures_total.insert(i); + erasures_not_recovered.insert(i); + if (want_to_read.count(i) != 0) + erasures_want.insert(i); + } + } + + // + // Case 1: + // + // When no chunk is missing there is no need to read more than what + // is wanted. + // + if (erasures_want.empty()) { + *minimum = want_to_read; + dout(20) << __func__ << " minimum == want_to_read == " + << want_to_read << dendl; + return 0; + } + + // + // Case 2: + // + // Try to recover erasures with as few chunks as possible. + // + for (vector::reverse_iterator i = layers.rbegin(); + i != layers.rend(); + ++i) { + // + // If this layer has no chunk that we want, skip it. + // + shard_id_set layer_want; + layer_want = shard_id_set::intersection(want_to_read, i->chunks_as_shard_set); + if (layer_want.empty()) + continue; + // + // Are some of the chunks we want missing ? + // + shard_id_set layer_erasures = shard_id_set::intersection(layer_want, erasures_want); + + shard_id_set layer_minimum; + if (layer_erasures.empty()) { + // + // The chunks we want are available, this is the minimum we need + // to read. + // + layer_minimum = layer_want; + } else { + shard_id_set erasures = shard_id_set::intersection(i->chunks_as_shard_set, erasures_not_recovered); + + if (erasures.size() > i->erasure_code->get_coding_chunk_count()) { + // + // There are too many erasures for this layer to recover: skip + // it and hope that an upper layer will be do better. + // + continue; + } else { + // + // Get all available chunks in that layer to recover the + // missing one(s). + // + layer_minimum = shard_id_set::difference(i->chunks_as_shard_set, erasures_not_recovered); + // + // Chunks recovered by this layer are removed from the list of + // erasures so that upper levels do not attempt to recover + // them. + // + for (shard_id_set::const_iterator j = erasures.begin(); + j != erasures.end(); + ++j) { + erasures_not_recovered.erase(*j); + erasures_want.erase(*j); + } + } + } + minimum->insert(layer_minimum); + } + if (erasures_want.empty()) { + minimum->insert(want_to_read); + for (shard_id_set::const_iterator i = erasures_total.begin(); + i != erasures_total.end(); + ++i) { + if (minimum->count(*i)) + minimum->erase(*i); + } + dout(20) << __func__ << " minimum = " << *minimum << dendl; + return 0; + } + } + + { + // + // Case 3: + // + // The previous strategy failed to recover from all erasures. + // + // Try to recover as many chunks as possible, even from layers + // that do not contain chunks that we want, in the hope that it + // will help the upper layers. + // + shard_id_set erasures_total; + for (shard_id_t i; i < get_chunk_count(); ++i) { + if (available_chunks.count(i) == 0) + erasures_total.insert(i); + } + + for (vector::reverse_iterator i = layers.rbegin(); + i != layers.rend(); + ++i) { + shard_id_set layer_erasures = shard_id_set::intersection(i->chunks_as_shard_set, erasures_total); + + // If this layer has no erasure, skip it + // + if (layer_erasures.empty()) + continue; + + if (layer_erasures.size() > 0 && + layer_erasures.size() <= i->erasure_code->get_coding_chunk_count()) { + // + // chunks recovered by this layer are removed from the list of + // erasures so that upper levels know they can rely on their + // availability + // + for (shard_id_set::const_iterator j = layer_erasures.begin(); + j != layer_erasures.end(); + ++j) { + erasures_total.erase(*j); + } + } + } + if (erasures_total.empty()) { + // + // Do not try to be smart about what chunks are necessary to + // recover, use all available chunks. + // + *minimum = available_chunks; + dout(20) << __func__ << " minimum == available_chunks == " + << available_chunks << dendl; + return 0; + } + } + + derr << __func__ << " not enough chunks in " << available_chunks + << " to read " << want_to_read << dendl; + return -EIO; +} + +IGNORE_DEPRECATED +[[deprecated]] int ErasureCodeLrc::encode_chunks(const set &want_to_encode, map *encoded) { @@ -776,7 +946,63 @@ int ErasureCodeLrc::encode_chunks(const set &want_to_encode, } return 0; } +END_IGNORE_DEPRECATED + +int ErasureCodeLrc::encode_chunks(const shard_id_map &in, + shard_id_map &out) +{ + unsigned int chunk_size = 0; + shard_id_set all_shards; + auto& nonconst_in = const_cast&>(in); + + for (const auto& [shard, ptr] : in) { + all_shards.insert(shard); + if (chunk_size == 0) chunk_size = ptr.length(); + else ceph_assert(chunk_size == ptr.length()); + } + + unsigned int top = layers.size(); + shard_id_set out_shards; + for (const auto& [shard, ptr] : out) { + out_shards.insert(shard); + all_shards.insert(shard); + if (chunk_size == 0) chunk_size = ptr.length(); + else ceph_assert(chunk_size == ptr.length()); + } + + for (vector::reverse_iterator i = layers.rbegin(); i != layers.rend(); ++i) { + --top; + if (i->chunks_as_shard_set.includes(all_shards)) { + break; + } + } + + for (unsigned int i = top; i < layers.size(); ++i) { + const Layer &layer = layers[i]; + shard_id_map layer_in(get_chunk_count()); + shard_id_map layer_out(get_chunk_count()); + shard_id_t j; + for (const auto& c : layer.chunks) { + if (nonconst_in.contains(shard_id_t(c))) + layer_in[j] = nonconst_in[shard_id_t(c)]; + if (out.contains(shard_id_t(c))) + layer_out[j] = out[shard_id_t(c)]; + ++j; + } + int err = layer.erasure_code->encode_chunks(layer_in, layer_out); + + if (err) { + derr << __func__ << " layer " << layer.chunks_map + << " failed with " << err << " trying to encode " + << dendl; + return err; + } + } + return 0; +} +IGNORE_DEPRECATED +[[deprecated]] int ErasureCodeLrc::decode_chunks(const set &want_to_read, const map &chunks, map *decoded) @@ -861,3 +1087,85 @@ int ErasureCodeLrc::decode_chunks(const set &want_to_read, return 0; } } +END_IGNORE_DEPRECATED + +int ErasureCodeLrc::decode_chunks(const shard_id_set &want_to_read, + shard_id_map &in, + shard_id_map &out) +{ + shard_id_set available_chunks; + shard_id_set erasures; + unsigned int chunk_size = 0; + + for (const auto& [shard, ptr] : in) { + if (chunk_size == 0) chunk_size = ptr.length(); + else ceph_assert(chunk_size == ptr.length()); + available_chunks.insert(shard); + } + + for (const auto& [shard, ptr] : out) { + if (chunk_size == 0) chunk_size = ptr.length(); + else ceph_assert(chunk_size == ptr.length()); + erasures.insert(shard); + } + + shard_id_set want_to_read_erasures; + + for (vector::reverse_iterator layer = layers.rbegin(); + layer != layers.rend(); + ++layer) { + shard_id_set layer_erasures = shard_id_set::intersection(layer->chunks_as_shard_set, erasures); + + if (layer_erasures.size() > + layer->erasure_code->get_coding_chunk_count()) { + // skip because there are too many erasures for this layer to recover + } else if(layer_erasures.size() == 0) { + // skip because all chunks are already available + } else { + shard_id_set layer_want_to_read; + shard_id_map layer_in(get_chunk_count()); + shard_id_map layer_out(get_chunk_count()); + shard_id_t j; + for (vector::const_iterator c = layer->chunks.begin(); + c != layer->chunks.end(); + ++c) + { + shard_id_t cs(*c); + if (!erasures.contains(cs)) { + if (in.contains(cs)) layer_in[j] = in[cs]; + else layer_in[j] = out[cs]; + } + else { + layer_out[j] = out[cs]; + } + ++j; + } + int err = layer->erasure_code->decode_chunks(layer_want_to_read, layer_in, layer_out); + if (err) { + derr << __func__ << " layer " << layer->chunks_map + << " failed with " << err << " trying to decode " + << layer_want_to_read << " with " << available_chunks << dendl; + return err; + } + + for (vector::const_iterator c = layer->chunks.begin(); + c != layer->chunks.end(); + ++c) + { + erasures.erase(shard_id_t(*c)); + } + want_to_read_erasures = shard_id_set::intersection(erasures, want_to_read); + if (want_to_read_erasures.size() == 0) + break; + } + } + + if (want_to_read_erasures.size() > 0) { + derr << __func__ << " want to read " << want_to_read + << " with available_chunks = " << available_chunks + << " end up being unable to read " << want_to_read_erasures << dendl; + return -EIO; + } else { + return 0; + } +} diff --git a/src/erasure-code/lrc/ErasureCodeLrc.h b/src/erasure-code/lrc/ErasureCodeLrc.h index 4bc44c32f6e..e988199e7b8 100644 --- a/src/erasure-code/lrc/ErasureCodeLrc.h +++ b/src/erasure-code/lrc/ErasureCodeLrc.h @@ -54,6 +54,7 @@ public: std::vector data; std::vector coding; std::vector chunks; + shard_id_set chunks_as_shard_set; std::set chunks_as_set; std::string chunks_map; ceph::ErasureCodeProfile profile; @@ -84,12 +85,20 @@ public: ~ErasureCodeLrc() override {} + [[deprecated]] std::set get_erasures(const std::set &need, const std::set &available) const; + shard_id_set get_erasures(const shard_id_set &need, + const shard_id_set &available) const; + + [[deprecated]] int _minimum_to_decode(const std::set &want_to_read, const std::set &available, std::set *minimum) override; + int _minimum_to_decode(const shard_id_set &want_to_read, + const shard_id_set &available, + shard_id_set *minimum) override; int create_rule(const std::string &name, CrushWrapper &crush, @@ -113,12 +122,18 @@ public: size_t get_minimum_granularity() override; + [[deprecated]] int encode_chunks(const std::set &want_to_encode, - std::map *encoded) override; - + std::map *encoded) override; + int encode_chunks(const shard_id_map &in, + shard_id_map &out); + [[deprecated]] int decode_chunks(const std::set &want_to_read, const std::map &chunks, std::map *decoded) override; + int decode_chunks(const shard_id_set &want_to_read, + shard_id_map &in, + shard_id_map &out) override; int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override; @@ -142,5 +157,6 @@ public: int layers_sanity_checks(const std::string &description_string, std::ostream *ss) const; }; +static_assert(!std::is_abstract()); #endif diff --git a/src/erasure-code/shec/ErasureCodeShec.cc b/src/erasure-code/shec/ErasureCodeShec.cc index 82a899ad35d..4f908f0114b 100644 --- a/src/erasure-code/shec/ErasureCodeShec.cc +++ b/src/erasure-code/shec/ErasureCodeShec.cc @@ -69,6 +69,8 @@ unsigned int ErasureCodeShec::get_chunk_size(unsigned int stripe_width) const return padded_length / k; } +IGNORE_DEPRECATED +[[deprecated]] int ErasureCodeShec::_minimum_to_decode(const set &want_to_read, const set &available_chunks, set *minimum_chunks) @@ -122,7 +124,60 @@ int ErasureCodeShec::_minimum_to_decode(const set &want_to_read, return 0; } +END_IGNORE_DEPRECATED +int ErasureCodeShec::_minimum_to_decode(const shard_id_set &want_to_read, + const shard_id_set &available_chunks, + shard_id_set *minimum_chunks) +{ + if (!minimum_chunks) return -EINVAL; + + for (shard_id_set::const_iterator it = available_chunks.begin(); it != available_chunks.end(); ++it){ + if (*it < 0 || k+m <= *it) return -EINVAL; + } + + for (shard_id_set::const_iterator it = want_to_read.begin(); it != want_to_read.end(); ++it){ + if (*it < 0 || k+m <= *it) return -EINVAL; + } + + int want[k + m]; + int avails[k + m]; + int minimum[k + m]; + + memset(want, 0, sizeof(want)); + memset(avails, 0, sizeof(avails)); + memset(minimum, 0, sizeof(minimum)); + (*minimum_chunks).clear(); + + for (shard_id_t shard : want_to_read) { + want[static_cast(shard)] = 1; + } + + for (shard_id_t shard : available_chunks) { + avails[static_cast(shard)] = 1; + } + + { + int decoding_matrix[k*k]; + int dm_row[k]; + int dm_column[k]; + memset(decoding_matrix, 0, sizeof(decoding_matrix)); + memset(dm_row, 0, sizeof(dm_row)); + memset(dm_column, 0, sizeof(dm_column)); + if (shec_make_decoding_matrix(true, want, avails, decoding_matrix, dm_row, dm_column, minimum) < 0) { + return -EIO; + } + } + + for (int i = 0; i < k + m; i++) { + if (minimum[i] == 1) minimum_chunks->insert(shard_id_t(i)); + } + + return 0; +} + +IGNORE_DEPRECATED +[[deprecated]] int ErasureCodeShec::minimum_to_decode_with_cost(const set &want_to_read, const map &available, set *minimum_chunks) @@ -136,7 +191,24 @@ int ErasureCodeShec::minimum_to_decode_with_cost(const set &want_to_read, return _minimum_to_decode(want_to_read, available_chunks, minimum_chunks); } +END_IGNORE_DEPRECATED + +int ErasureCodeShec::minimum_to_decode_with_cost(const shard_id_set &want_to_read, + const shard_id_map &available, + shard_id_set *minimum_chunks) +{ + shard_id_set available_chunks; + + for (shard_id_map::const_iterator i = available.begin(); + i != available.end(); + ++i) + available_chunks.insert(i->first); + + return _minimum_to_decode(want_to_read, available_chunks, minimum_chunks); +} +IGNORE_DEPRECATED +[[deprecated]] int ErasureCodeShec::encode(const set &want_to_encode, const bufferlist &in, map *encoded) @@ -160,6 +232,7 @@ int ErasureCodeShec::encode(const set &want_to_encode, return 0; } +[[deprecated]] int ErasureCodeShec::encode_chunks(const set &want_to_encode, map *encoded) { @@ -170,7 +243,49 @@ int ErasureCodeShec::encode_chunks(const set &want_to_encode, shec_encode(&chunks[0], &chunks[k], (*encoded)[0].length()); return 0; } +END_IGNORE_DEPRECATED + +int ErasureCodeShec::encode_chunks(const shard_id_map &in, + shard_id_map &out) +{ + char *chunks[k + m]; //TODO don't use variable length arrays + memset(chunks, 0, sizeof(char*) * (k + m)); + uint64_t size = 0; + + for (auto &&[shard, ptr] : in) { + if (size == 0) size = ptr.length(); + else ceph_assert(size == ptr.length()); + chunks[static_cast(shard)] = const_cast(ptr.c_str()); + } + + for (auto &&[shard, ptr] : out) { + if (size == 0) size = ptr.length(); + else ceph_assert(size == ptr.length()); + chunks[static_cast(shard)] = ptr.c_str(); + } + + char *zeros = nullptr; + + for (shard_id_t i; i < k + m; ++i) { + if (in.contains(i) || out.contains(i)) continue; + + if (zeros == nullptr) { + zeros = (char*)malloc(size); + memset(zeros, 0, size); + } + + chunks[static_cast(i)] = zeros; + } + + shec_encode(&chunks[0], &chunks[k], size); + + if (zeros != nullptr) free(zeros); + + return 0; +} +IGNORE_DEPRECATED +[[deprecated]] int ErasureCodeShec::_decode(const set &want_to_read, const map &chunks, map *decoded) @@ -218,6 +333,7 @@ int ErasureCodeShec::_decode(const set &want_to_read, return decode_chunks(want_to_read, chunks, decoded); } +[[deprecated]] int ErasureCodeShec::decode_chunks(const set &want_to_read, const map &chunks, map *decoded) @@ -252,6 +368,57 @@ int ErasureCodeShec::decode_chunks(const set &want_to_read, return 0; } } +END_IGNORE_DEPRECATED + +int ErasureCodeShec::decode_chunks(const shard_id_set &want_to_read, + shard_id_map &in, + shard_id_map &out) +{ + unsigned int size = 0; + int erased[k + m]; + int erased_count = 0; + int avails[k + m]; + char *data[k]; + char *coding[m]; + + for (auto &&[shard, ptr] : in) { + if (size == 0) size = ptr.length(); + else ceph_assert(size == ptr.length()); + if (shard < k) { + data[static_cast(shard)] = ptr.c_str(); + } + else { + coding[static_cast(shard) - k] = ptr.c_str(); + } + avails[static_cast(shard)] = 1; + erased[static_cast(shard)] = 0; + } + + for (auto &&[shard, ptr] : out) { + if (size == 0) size = ptr.length(); + else ceph_assert(size == ptr.length()); + if (shard < k) { + data[static_cast(shard)] = ptr.c_str(); + } + else { + coding[static_cast(shard) - k] = ptr.c_str(); + } + avails[static_cast(shard)] = 0; + if (want_to_read.count(shard) > 0) { + erased[static_cast(shard)] = 1; + erased_count++; + } + else { + erased[static_cast(shard)] = 0; + } + } + + if (erased_count > 0) { + return shec_decode(erased, avails, data, coding, size); + } else { + return 0; + } +} // // ErasureCodeShecReedSolomonVandermonde @@ -306,7 +473,7 @@ void ErasureCodeShecReedSolomonVandermonde::apply_delta(const shard_id_map(datashard) + (k * (static_cast(codingshard) - k))], blocksize, output_data, 1); break; case 32: - galois_w32_region_multiply(input_data, matrix[datashard + (k * (codingshard - k))], blocksize, output_data, 1); + galois_w32_region_multiply(input_data, matrix[static_cast(datashard) + (k * (int(codingshard) - k))], blocksize, output_data, 1); break; } } diff --git a/src/erasure-code/shec/ErasureCodeShec.h b/src/erasure-code/shec/ErasureCodeShec.h index cd6025936dc..0af840aef58 100644 --- a/src/erasure-code/shec/ErasureCodeShec.h +++ b/src/erasure-code/shec/ErasureCodeShec.h @@ -56,7 +56,7 @@ public: w(0), DEFAULT_W(8), technique(_technique), - matrix(0) + matrix(nullptr) {} ~ErasureCodeShec() override {} @@ -78,26 +78,50 @@ public: unsigned int get_chunk_size(unsigned int stripe_width) const override; + using ErasureCode::_minimum_to_decode; + [[deprecated]] int _minimum_to_decode(const std::set &want_to_read, const std::set &available_chunks, - std::set *minimum); + std::set *minimum) override; + int _minimum_to_decode(const shard_id_set &want_to_read, + const shard_id_set &available_chunks, + shard_id_set *minimum) override; + + [[deprecated]] int minimum_to_decode_with_cost(const std::set &want_to_read, const std::map &available, std::set *minimum) override; + int minimum_to_decode_with_cost(const shard_id_set &want_to_read, + const shard_id_map &available, + shard_id_set *minimum) override; + + using ErasureCode::encode; + [[deprecated]] int encode(const std::set &want_to_encode, const ceph::buffer::list &in, std::map *encoded) override; + + + [[deprecated]] int encode_chunks(const std::set &want_to_encode, std::map *encoded) override; + int encode_chunks(const shard_id_map &in, + shard_id_map &out) override; + using ErasureCode::_decode; + [[deprecated]] int _decode(const std::set &want_to_read, const std::map &chunks, std::map *decoded) override; + [[deprecated]] int decode_chunks(const std::set &want_to_read, const std::map &chunks, std::map *decoded) override; + int decode_chunks(const shard_id_set &want_to_read, + shard_id_map &in, + shard_id_map &out) override; int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override; virtual void shec_encode(char **data, @@ -148,9 +172,9 @@ public: void encode_delta(const ceph::bufferptr &old_data, const ceph::bufferptr &new_data, - ceph::bufferptr *delta_maybe_in_place); - void apply_delta(const shard_id_map &in, - shard_id_map &out); + ceph::bufferptr *delta_maybe_in_place) override; + void apply_delta(const shard_id_map &in, + shard_id_map &out) override; unsigned get_alignment() const override; size_t get_minimum_granularity() override @@ -161,5 +185,6 @@ public: private: int parse(const ceph::ErasureCodeProfile &profile) override; }; +static_assert(!std::is_abstract()); #endif diff --git a/src/osd/ECBackend.cc b/src/osd/ECBackend.cc index e290bb38355..3adc653731e 100644 --- a/src/osd/ECBackend.cc +++ b/src/osd/ECBackend.cc @@ -1124,11 +1124,11 @@ void ECBackend::handle_sub_read( dout(20) << __func__ << ": Checking hash of " << i->first << dendl; bufferhash h(-1); h << bl; - if (h.digest() != hinfo->get_chunk_hash(static_cast(shard))) { + if (h.digest() != hinfo->get_chunk_hash(shard)) { get_parent()->clog_error() << "Bad hash for " << i->first << " digest 0x" - << hex << h.digest() << " expected 0x" << hinfo->get_chunk_hash(static_cast(shard)) << dec; + << hex << h.digest() << " expected 0x" << hinfo->get_chunk_hash(shard) << dec; dout(5) << __func__ << ": Bad hash for " << i->first << " digest 0x" - << hex << h.digest() << " expected 0x" << hinfo->get_chunk_hash(static_cast(shard)) << dec << dendl; + << hex << h.digest() << " expected 0x" << hinfo->get_chunk_hash(shard) << dec << dendl; r = -EIO; goto error; } @@ -1800,11 +1800,11 @@ int ECBackend::be_deep_scrub( return 0; } - if (hinfo->get_chunk_hash(static_cast(get_parent()->whoami_shard().shard)) != + if (hinfo->get_chunk_hash(get_parent()->whoami_shard().shard) != pos.data_hash.digest()) { dout(0) << "_scan_list " << poid << " got incorrect hash on read 0x" << std::hex << pos.data_hash.digest() << " != expected 0x" - << hinfo->get_chunk_hash(static_cast(get_parent()->whoami_shard().shard)) + << hinfo->get_chunk_hash(get_parent()->whoami_shard().shard) << std::dec << dendl; o.ec_hash_mismatch = true; return 0; @@ -1816,7 +1816,7 @@ int ECBackend::be_deep_scrub( * we match our chunk hash and our recollection of the hash for * chunk 0 matches that of our peers, there is likely no corruption. */ - o.digest = hinfo->get_chunk_hash(0); + o.digest = hinfo->get_chunk_hash(shard_id_t(0)); o.digest_present = true; } else { /* Hack! We must be using partial overwrites, and partial overwrites diff --git a/src/osd/ECBackend.h b/src/osd/ECBackend.h index b23214fceaa..71d14a1e9fc 100644 --- a/src/osd/ECBackend.h +++ b/src/osd/ECBackend.h @@ -26,6 +26,11 @@ #include "ExtentCache.h" #include "ECListener.h" +/* This file is soon going to be replaced (before next release), so we are going + * to simply ignore all deprecated warnings. + * */ +IGNORE_DEPRECATED + //forward declaration struct ECSubWrite; struct ECSubWriteReply; @@ -369,6 +374,7 @@ public: have.insert(static_cast(i->shard)); } std::map>> min; + return ec_impl->minimum_to_decode(want, have, &min) == 0; } }; @@ -442,3 +448,5 @@ public: } }; ostream &operator<<(ostream &lhs, const ECBackend::RMWPipeline::pipeline_state_t &rhs); + +END_IGNORE_DEPRECATED diff --git a/src/osd/ECBackendL.h b/src/osd/ECBackendL.h index 0cf0d823c4c..f4f0c3afd50 100644 --- a/src/osd/ECBackendL.h +++ b/src/osd/ECBackendL.h @@ -369,7 +369,9 @@ public: have.insert(static_cast(i->shard)); } std::map>> min; +IGNORE_DEPRECATED return ec_impl->minimum_to_decode(want, have, &min) == 0; +END_IGNORE_DEPRECATED } }; std::unique_ptr get_is_recoverable_predicate() const { diff --git a/src/osd/ECCommon.cc b/src/osd/ECCommon.cc index 91f9b61a750..47870f11791 100644 --- a/src/osd/ECCommon.cc +++ b/src/osd/ECCommon.cc @@ -34,6 +34,11 @@ #undef dout_prefix #define dout_prefix _prefix(_dout, this) +/* This file is soon going to be replaced (before next release), so we are going + * to simply ignore all deprecated warnings. + * */ +IGNORE_DEPRECATED + using std::dec; using std::hex; using std::less; @@ -329,8 +334,8 @@ void ECCommon::ReadPipeline::get_min_want_to_read_shards( const auto distance = std::min(right_chunk_index - left_chunk_index, (uint64_t)sinfo.get_k()); for(uint64_t i = 0; i < distance; i++) { - auto raw_shard = (left_chunk_index + i) % sinfo.get_k(); - want_to_read->insert(sinfo.get_shard(raw_shard)); + raw_shard_id_t raw_shard((left_chunk_index + i) % sinfo.get_k()); + want_to_read->insert(static_cast(sinfo.get_shard(raw_shard))); } } @@ -497,8 +502,8 @@ void ECCommon::ReadPipeline::do_read_op(ReadOp &op) void ECCommon::ReadPipeline::get_want_to_read_shards( std::set *want_to_read) const { - for (int i = 0; i < (int)sinfo.get_k(); ++i) { - want_to_read->insert(sinfo.get_shard(i)); + for (raw_shard_id_t i; i < (int)sinfo.get_k(); ++i) { + want_to_read->insert(static_cast(sinfo.get_shard(i))); } } @@ -561,8 +566,8 @@ struct ClientReadCompleter : ECCommon::ReadCompleter { uint64_t chunk_size = read_pipeline.sinfo.get_chunk_size(); uint64_t trim_offset = 0; for (auto shard : wanted_to_read) { - if (read_pipeline.sinfo.get_raw_shard(shard) * chunk_size < - aligned_offset_in_stripe) { + int s = static_cast(read_pipeline.sinfo.get_raw_shard(shard_id_t(shard))); + if ( s * chunk_size < aligned_offset_in_stripe) { trim_offset += chunk_size; } else { break; @@ -1101,3 +1106,5 @@ ECUtil::HashInfoRef ECCommon::UnstableHashInfoRegistry::get_hash_info( } return ref; } + +END_IGNORE_DEPRECATED diff --git a/src/osd/ECCommonL.cc b/src/osd/ECCommonL.cc index 7bf517381ca..d6aceacebc7 100644 --- a/src/osd/ECCommonL.cc +++ b/src/osd/ECCommonL.cc @@ -53,6 +53,8 @@ using ceph::bufferptr; using ceph::ErasureCodeInterfaceRef; using ceph::Formatter; +IGNORE_DEPRECATED + namespace ECLegacy { static ostream& _prefix(std::ostream *_dout, ECCommonL::RMWPipeline *rmw_pipeline) { return rmw_pipeline->get_parent()->gen_dbg_prefix(*_dout) << "ECCommonL "; @@ -1103,3 +1105,5 @@ ECUtilL::HashInfoRef ECCommonL::UnstableHashInfoRegistry::get_hash_info( return ref; } } + +END_IGNORE_DEPRECATED \ No newline at end of file diff --git a/src/osd/ECTypes.h b/src/osd/ECTypes.h index c686e2a4ce8..932d6424ed2 100644 --- a/src/osd/ECTypes.h +++ b/src/osd/ECTypes.h @@ -14,6 +14,9 @@ #pragma once +#include "include/types.h" +#include "common/mini_flat_map.h" + struct ec_align_t { uint64_t offset; uint64_t size; @@ -23,5 +26,49 @@ struct ec_align_t { << rhs.size << "," << rhs.flags; } + ec_align_t(std::pair p, uint32_t flags) + : offset(p.first), size(p.second), flags(flags) {} + ec_align_t(uint64_t offset, uint64_t size, uint32_t flags) + : offset(offset), size(size), flags(flags) {} + bool operator==(const ec_align_t &other) const; +}; + +struct raw_shard_id_t { + int8_t id; + + raw_shard_id_t() : id(0) {} + explicit constexpr raw_shard_id_t(int8_t _id) : id(_id) {} + + explicit constexpr operator int8_t() const { return id; } + // For convenient use in comparisons + explicit constexpr operator int() const { return id; } + explicit constexpr operator uint64_t() const { return id; } + + const static raw_shard_id_t NO_SHARD; + + void encode(ceph::buffer::list &bl) const { + using ceph::encode; + encode(id, bl); + } + void decode(ceph::buffer::list::const_iterator &bl) { + using ceph::decode; + decode(id, bl); + } + void dump(ceph::Formatter *f) const { + f->dump_int("id", id); + } + static void generate_test_instances(std::list& ls) { + ls.push_back(new raw_shard_id_t(1)); + ls.push_back(new raw_shard_id_t(2)); + } + raw_shard_id_t& operator++() { ++id; return *this; } + friend constexpr std::strong_ordering operator<=>(const raw_shard_id_t &lhs, const raw_shard_id_t &rhs) { return lhs.id <=> rhs.id; } + friend constexpr std::strong_ordering operator<=>(int lhs, const raw_shard_id_t &rhs) { return lhs <=> rhs.id; } + friend constexpr std::strong_ordering operator<=>(const raw_shard_id_t &lhs, int rhs) { return lhs.id <=> rhs; } + + raw_shard_id_t& operator=(int other) { id = other; return *this; } + bool operator==(const raw_shard_id_t &other) const { return id == other.id; } }; +template +using shard_id_map = mini_flat_map; diff --git a/src/osd/ECUtil.cc b/src/osd/ECUtil.cc index e7e6faffdf6..b31e4a86c66 100644 --- a/src/osd/ECUtil.cc +++ b/src/osd/ECUtil.cc @@ -6,6 +6,11 @@ #include "include/encoding.h" #include "ECUtil.h" +/* This file is soon going to be replaced (before next release), so we are going + * to simply ignore all deprecated warnings. + * */ +IGNORE_DEPRECATED + using namespace std; using ceph::bufferlist; using ceph::ErasureCodeInterfaceRef; @@ -258,3 +263,5 @@ const string &ECUtil::get_hinfo_key() { return HINFO_KEY; } + +END_IGNORE_DEPRECATED diff --git a/src/osd/ECUtil.h b/src/osd/ECUtil.h index 48da5227306..f39dbae524b 100644 --- a/src/osd/ECUtil.h +++ b/src/osd/ECUtil.h @@ -28,35 +28,35 @@ class stripe_info_t { const uint64_t chunk_size; const unsigned int k; // Can be calculated with a division from above. Better to cache. const unsigned int m; - const std::vector chunk_mapping; - const std::vector chunk_mapping_reverse; + const std::vector chunk_mapping; + const std::vector chunk_mapping_reverse; private: - static std::vector complete_chunk_mapping( - std::vector _chunk_mapping, unsigned int n) + static std::vector complete_chunk_mapping( + std::vector _chunk_mapping, unsigned int n) { unsigned int size = _chunk_mapping.size(); - std::vector chunk_mapping(n); - for (unsigned int i = 0; i < n; i++) { + std::vector chunk_mapping(n); + for (shard_id_t i; i < n; ++i) { if (size > i) { - chunk_mapping.at(i) = _chunk_mapping.at(i); + chunk_mapping.at(static_cast(i)) = _chunk_mapping.at(static_cast(i)); } else { - chunk_mapping.at(i) = static_cast(i); + chunk_mapping.at(static_cast(i)) = i; } } return chunk_mapping; } - static std::vector reverse_chunk_mapping( - std::vector chunk_mapping) + static std::vector reverse_chunk_mapping( + std::vector chunk_mapping) { unsigned int size = chunk_mapping.size(); - std::vector reverse(size); - std::vector used(size,false); - for (unsigned int i = 0; i < size; i++) { - int index = chunk_mapping.at(i); + std::vector reverse(size); + shard_id_set used; + for (raw_shard_id_t i; i < size; ++i) { + shard_id_t index = chunk_mapping.at(static_cast(i)); // Mapping must be a bijection and a permutation - ceph_assert(!used.at(index)); - used.at(index) = true; - reverse.at(index) = i; + ceph_assert(!used.contains(index)); + used.insert(index); + reverse.at(static_cast(index)) = i; } return reverse; } @@ -77,12 +77,12 @@ public: chunk_size(stripe_width / k), k(k), m(m), - chunk_mapping(complete_chunk_mapping(std::vector(), k + m)), + chunk_mapping(complete_chunk_mapping(std::vector(), k + m)), chunk_mapping_reverse(reverse_chunk_mapping(chunk_mapping)) { ceph_assert(stripe_width % k == 0); } stripe_info_t(unsigned int k, unsigned int m, uint64_t stripe_width, - std::vector _chunk_mapping) + std::vector _chunk_mapping) : stripe_width(stripe_width), chunk_size(stripe_width / k), k(k), @@ -109,11 +109,11 @@ public: unsigned int get_k_plus_m() const { return k + m; } - int get_shard(unsigned int raw_shard) const { - return chunk_mapping[raw_shard]; + shard_id_t get_shard(raw_shard_id_t raw_shard) const { + return chunk_mapping[static_cast(raw_shard)]; } - unsigned int get_raw_shard(int shard) const { - return chunk_mapping_reverse[shard]; + raw_shard_id_t get_raw_shard(shard_id_t shard) const { + return chunk_mapping_reverse[static_cast(shard)]; } uint64_t logical_to_prev_chunk_offset(uint64_t offset) const { return (offset / stripe_width) * chunk_size; @@ -229,9 +229,9 @@ public: void decode(ceph::buffer::list::const_iterator &bl); void dump(ceph::Formatter *f) const; static void generate_test_instances(std::list& o); - uint32_t get_chunk_hash(int shard) const { - ceph_assert((unsigned)shard < cumulative_shard_hashes.size()); - return cumulative_shard_hashes[shard]; + uint32_t get_chunk_hash(shard_id_t shard) const { + ceph_assert(shard < cumulative_shard_hashes.size()); + return cumulative_shard_hashes[static_cast(shard)]; } uint64_t get_total_chunk_size() const { return total_chunk_size; diff --git a/src/osd/ECUtilL.cc b/src/osd/ECUtilL.cc index b8293bc2b3b..7c13a36f496 100644 --- a/src/osd/ECUtilL.cc +++ b/src/osd/ECUtilL.cc @@ -52,7 +52,9 @@ namespace ECLegacy { chunks[j->first].substr_of(j->second, i, sinfo.get_chunk_size()); } bufferlist bl; +IGNORE_DEPRECATED int r = ec_impl->decode_concat(want_to_read, chunks, &bl); +END_IGNORE_DEPRECATED ceph_assert(r == 0); ceph_assert(bl.length() % sinfo.get_chunk_size() == 0); out->claim_append(bl); @@ -89,7 +91,9 @@ namespace ECLegacy { } map>> min; +IGNORE_DEPRECATED int r = ec_impl->minimum_to_decode(need, avail, &min); +END_IGNORE_DEPRECATED ceph_assert(r == 0); int chunks_count = 0; @@ -119,7 +123,9 @@ namespace ECLegacy { repair_data_per_chunk); } map out_bls; + IGNORE_DEPRECATED r = ec_impl->decode(need, chunks, &out_bls, sinfo.get_chunk_size()); + END_IGNORE_DEPRECATED ceph_assert(r == 0); for (auto j = out.begin(); j != out.end(); ++j) { ceph_assert(out_bls.count(j->first)); @@ -153,7 +159,9 @@ namespace ECLegacy { map encoded; bufferlist buf; buf.substr_of(in, i, sinfo.get_stripe_width()); +IGNORE_DEPRECATED int r = ec_impl->encode(want, buf, &encoded); +END_IGNORE_DEPRECATED ceph_assert(r == 0); for (map::iterator i = encoded.begin(); i != encoded.end(); diff --git a/src/osd/ECUtilL.h b/src/osd/ECUtilL.h index 1938edac313..18038b11184 100644 --- a/src/osd/ECUtilL.h +++ b/src/osd/ECUtilL.h @@ -33,13 +33,13 @@ class stripe_info_t { const std::vector chunk_mapping_reverse; private: static std::vector complete_chunk_mapping( - std::vector _chunk_mapping, unsigned int n) + std::vector _chunk_mapping, unsigned int n) { unsigned int size = _chunk_mapping.size(); std::vector chunk_mapping(n); for (unsigned int i = 0; i < n; i++) { if (size > i) { - chunk_mapping.at(i) = _chunk_mapping.at(i); + chunk_mapping.at(i) = int(_chunk_mapping.at(i)); } else { chunk_mapping.at(i) = static_cast(i); } @@ -78,12 +78,12 @@ public: chunk_size(stripe_width / k), k(k), m(m), - chunk_mapping(complete_chunk_mapping(std::vector(), k + m)), + chunk_mapping(complete_chunk_mapping(std::vector(), k + m)), chunk_mapping_reverse(reverse_chunk_mapping(chunk_mapping)) { ceph_assert(stripe_width % k == 0); } stripe_info_t(unsigned int k, unsigned int m, uint64_t stripe_width, - std::vector _chunk_mapping) + std::vector _chunk_mapping) : stripe_width(stripe_width), chunk_size(stripe_width / k), k(k), diff --git a/src/osd/osd_types.h b/src/osd/osd_types.h index ebc1d359ee2..7d043056211 100644 --- a/src/osd/osd_types.h +++ b/src/osd/osd_types.h @@ -147,6 +147,8 @@ typedef interval_set< snapid_t, mempool::osdmap::flat_map> snap_interval_set_t; +using shard_id_set = bitset_set<128, shard_id_t>; +WRITE_CLASS_DENC(shard_id_set) /** * osd request identifier diff --git a/src/test/erasure-code/ErasureCodeExample.h b/src/test/erasure-code/ErasureCodeExample.h index 18247105d7c..11f4e8f5def 100644 --- a/src/test/erasure-code/ErasureCodeExample.h +++ b/src/test/erasure-code/ErasureCodeExample.h @@ -26,12 +26,19 @@ #include "osd/osd_types.h" #include "erasure-code/ErasureCode.h" +// Chunk version is deprecated. #define FIRST_DATA_CHUNK 0 #define SECOND_DATA_CHUNK 1 +#define FIRST_DATA_SHARD shard_id_t(0) +#define SECOND_DATA_SHARD shard_id_t(1) #define DATA_CHUNKS 2u +#define DATA_SHARDS 2u #define CODING_CHUNK 2 +#define CODING_SHARD shard_id_t(2) + #define CODING_CHUNKS 1u +#define CODING_SHARDS 1u #define MINIMUM_TO_RECOVER 2u @@ -46,6 +53,8 @@ public: "indep", pg_pool_t::TYPE_ERASURE, ss); } + IGNORE_DEPRECATED + [[deprecated]] int minimum_to_decode_with_cost(const std::set &want_to_read, const std::map &available, std::set *minimum) override { @@ -75,6 +84,37 @@ public: available_chunks.insert(i->first); return _minimum_to_decode(want_to_read, available_chunks, minimum); } + END_IGNORE_DEPRECATED + + int minimum_to_decode_with_cost(const shard_id_set &want_to_read, + const shard_id_map &available, + shard_id_set *minimum) override { + // + // If one chunk is more expensive to fetch than the others, + // recover it instead. For instance, if the cost reflects the + // time it takes for a chunk to be retrieved from a remote + // OSD and if CPU is cheap, it could make sense to recover + // instead of fetching the chunk. + // + shard_id_map c2c(available); + if (c2c.size() > DATA_SHARDS) { + if (c2c[FIRST_DATA_SHARD] > c2c[SECOND_DATA_SHARD] && + c2c[FIRST_DATA_SHARD] > c2c[CODING_SHARD]) + c2c.erase(FIRST_DATA_SHARD); + else if(c2c[SECOND_DATA_SHARD] > c2c[FIRST_DATA_SHARD] && + c2c[SECOND_DATA_SHARD] > c2c[CODING_SHARD]) + c2c.erase(SECOND_DATA_SHARD); + else if(c2c[CODING_SHARD] > c2c[FIRST_DATA_SHARD] && + c2c[CODING_SHARD] > c2c[SECOND_DATA_SHARD]) + c2c.erase(CODING_SHARD); + } + shard_id_set available_chunks; + for (shard_id_map::const_iterator i = c2c.cbegin(); + i != c2c.cend(); + ++i) + available_chunks.insert(i->first); + return _minimum_to_decode(want_to_read, available_chunks, minimum); + } uint64_t get_supported_optimizations() const override { return FLAG_EC_PLUGIN_PARTIAL_READ_OPTIMIZATION; @@ -96,6 +136,7 @@ public: return 1; } + [[deprecated]] int encode(const std::set &want_to_encode, const bufferlist &in, std::map *encoded) override { @@ -134,12 +175,69 @@ public: return 0; } + int encode(const shard_id_set &want_to_encode, + const bufferlist &in, + shard_id_map *encoded) override { + // + // make sure all data chunks have the same length, allocating + // padding if necessary. + // + unsigned int chunk_length = get_chunk_size(in.length()); + bufferlist out(in); + unsigned int width = get_chunk_count() * get_chunk_size(in.length()); + bufferptr pad(width - in.length()); + pad.zero(0, get_data_chunk_count()); + out.push_back(pad); + // + // compute the coding chunk with first chunk ^ second chunk + // + char *p = out.c_str(); + for (unsigned i = 0; i < chunk_length; i++) + p[i + int(CODING_SHARD) * chunk_length] = + p[i + int(FIRST_DATA_SHARD) * chunk_length] ^ + p[i + int(SECOND_DATA_SHARD) * chunk_length]; + // + // populate the bufferlist with bufferptr pointing + // to chunk boundaries + // + const bufferptr &ptr = out.front(); + for (auto j = want_to_encode.begin(); + j != want_to_encode.end(); + ++j) { + bufferlist tmp; + bufferptr chunk(ptr, int(*j) * chunk_length, chunk_length); + tmp.push_back(chunk); + tmp.claim_append((*encoded)[*j]); + (*encoded)[*j].swap(tmp); + } + return 0; + } + + [[deprecated]] int encode_chunks(const std::set &want_to_encode, std::map *encoded) override { ceph_abort(); return 0; } + int encode_chunks(const shard_id_map &in, + shard_id_map &out) override { + ceph_abort(); + return 0; + } + + void encode_delta(const bufferptr &old_data, + const bufferptr &new_data, + bufferptr *delta_maybe_in_place) { + ceph_abort(); + } + + void apply_delta(const shard_id_map &in, + shard_id_map &out) override { + ceph_abort(); + } + + [[deprecated]] int _decode(const std::set &want_to_read, const std::map &chunks, std::map *decoded) override { @@ -186,6 +284,54 @@ public: return 0; } + + int _decode(const shard_id_set &want_to_read, + const shard_id_map &chunks, + shard_id_map *decoded) override { + // + // All chunks have the same size + // + unsigned chunk_length = (*chunks.begin()).second.length(); + for (shard_id_set::const_iterator i = want_to_read.begin(); + i != want_to_read.end(); + ++i) { + if (chunks.find(*i) != chunks.end()) { + // + // If the chunk is available, just copy the bufferptr pointer + // to the decoded argument. + // + (*decoded)[*i] = chunks.find(*i)->second; + } else if(chunks.size() != 2) { + // + // If a chunk is missing and there are not enough chunks + // to recover, abort. + // + return -ERANGE; + } else { + // + // No matter what the missing chunk is, XOR of the other + // two recovers it. + // + shard_id_map::const_iterator k = chunks.begin(); + const char *a = k->second.front().c_str(); + ++k; + const char *b = k->second.front().c_str(); + bufferptr chunk(chunk_length); + char *c = chunk.c_str(); + for (unsigned j = 0; j < chunk_length; j++) { + c[j] = a[j] ^ b[j]; + } + + bufferlist tmp; + tmp.append(chunk); + tmp.claim_append((*decoded)[*i]); + (*decoded)[*i].swap(tmp); + } + } + return 0; + } + + [[deprecated]] int decode_chunks(const std::set &want_to_read, const std::map &chunks, std::map *decoded) override { @@ -193,8 +339,15 @@ public: return 0; } - const std::vector &get_chunk_mapping() const override { - static std::vector mapping; + int decode_chunks(const shard_id_set &want_to_read, + shard_id_map &in, + shard_id_map &out) override { + ceph_abort(); + return 0; + } + + const std::vector &get_chunk_mapping() const override { + static std::vector mapping; return mapping; } diff --git a/src/test/erasure-code/TestErasureCode.cc b/src/test/erasure-code/TestErasureCode.cc index a06f721a20c..0a5ff0f0a79 100644 --- a/src/test/erasure-code/TestErasureCode.cc +++ b/src/test/erasure-code/TestErasureCode.cc @@ -26,13 +26,13 @@ using namespace std; class ErasureCodeTest : public ErasureCode { public: - map encode_chunks_encoded; + shard_id_map encode_chunks_encoded; unsigned int k; unsigned int m; unsigned int chunk_size; ErasureCodeTest(unsigned int _k, unsigned int _m, unsigned int _chunk_size) : - k(_k), m(_m), chunk_size(_chunk_size) {} + encode_chunks_encoded(_k + _m), k(_k), m(_m), chunk_size(_chunk_size) {} ~ErasureCodeTest() override {} int init(ErasureCodeProfile &profile, ostream *ss) override { @@ -46,21 +46,34 @@ public: return chunk_size; } size_t get_minimum_granularity() override { return 1; } + [[deprecated]] int encode_chunks(const set &want_to_encode, map *encoded) override { - encode_chunks_encoded = *encoded; + ceph_abort_msg("Only new API is tested"); + //encode_chunks_encoded = *encoded; return 0; } + int encode_chunks(const shard_id_map &in, + shard_id_map &out) override { + return 0; + } + [[deprecated]] int decode_chunks(const set &want_to_read, const map &chunks, map *decoded) override { ceph_abort_msg("ErasureCode::decode_chunks not implemented"); } + int decode_chunks(const shard_id_set &want_to_read, + shard_id_map &in, + shard_id_map &out) override { + ceph_abort_msg("ErasureCode::decode_chunks not implemented"); + } int create_rule(const string &name, CrushWrapper &crush, ostream *ss) const override { return 0; } }; +static_assert(!std::is_abstract()); /* * If we have a buffer of 5 bytes (X below) and a chunk size of 3 @@ -102,9 +115,8 @@ TEST(ErasureCodeTest, encode_memory_align) unsigned chunk_size = ErasureCode::SIMD_ALIGN * 7; ErasureCodeTest erasure_code(k, m, chunk_size); - set want_to_encode; - for (unsigned int i = 0; i < erasure_code.get_chunk_count(); i++) - want_to_encode.insert(i); + shard_id_set want_to_encode; + want_to_encode.insert_range(shard_id_t(0), erasure_code.get_chunk_count()); string data(chunk_size + chunk_size / 2, 'X'); // uses 1.5 chunks out of 3 // make sure nothing is memory aligned bufferptr ptr(buffer::create_aligned(data.length() + 1, ErasureCode::SIMD_ALIGN)); @@ -113,15 +125,15 @@ TEST(ErasureCodeTest, encode_memory_align) ptr.set_length(data.length()); bufferlist in; in.append(ptr); - map encoded; + shard_id_map encoded(k+m); ASSERT_FALSE(in.is_aligned(ErasureCode::SIMD_ALIGN)); ASSERT_EQ(0, erasure_code.encode(want_to_encode, in, &encoded)); - for (unsigned int i = 0; i < erasure_code.get_chunk_count(); i++) + for (shard_id_t i; i < erasure_code.get_chunk_count(); ++i) ASSERT_TRUE(encoded[i].is_aligned(ErasureCode::SIMD_ALIGN)); for (unsigned i = 0; i < chunk_size / 2; i++) - ASSERT_EQ(encoded[1][i], 'X'); - ASSERT_NE(encoded[1][chunk_size / 2], 'X'); + ASSERT_EQ(encoded[shard_id_t(1)][i], 'X'); + ASSERT_NE(encoded[shard_id_t(1)][chunk_size / 2], 'X'); } TEST(ErasureCodeTest, encode_misaligned_non_contiguous) @@ -131,8 +143,8 @@ TEST(ErasureCodeTest, encode_misaligned_non_contiguous) unsigned chunk_size = ErasureCode::SIMD_ALIGN * 7; ErasureCodeTest erasure_code(k, m, chunk_size); - set want_to_encode; - for (unsigned int i = 0; i < erasure_code.get_chunk_count(); i++) + shard_id_set want_to_encode; + for (shard_id_t i; i < erasure_code.get_chunk_count(); ++i) want_to_encode.insert(i); string data(chunk_size, 'X'); // create a non contiguous bufferlist where the frist and the second @@ -146,7 +158,7 @@ TEST(ErasureCodeTest, encode_misaligned_non_contiguous) bufferptr ptr(buffer::create_aligned(data.length() + 1, ErasureCode::SIMD_ALIGN)); in.append(ptr); } - map encoded; + shard_id_map encoded(k + m); ASSERT_FALSE(in.is_contiguous()); ASSERT_TRUE(in.front().is_aligned(ErasureCode::SIMD_ALIGN)); @@ -154,7 +166,7 @@ TEST(ErasureCodeTest, encode_misaligned_non_contiguous) ASSERT_TRUE(in.back().is_aligned(ErasureCode::SIMD_ALIGN)); ASSERT_FALSE(in.back().is_n_align_sized(chunk_size)); ASSERT_EQ(0, erasure_code.encode(want_to_encode, in, &encoded)); - for (unsigned int i = 0; i < erasure_code.get_chunk_count(); i++) { + for (shard_id_t i; i < erasure_code.get_chunk_count(); ++i) { ASSERT_TRUE(encoded[i].is_aligned(ErasureCode::SIMD_ALIGN)); ASSERT_TRUE(encoded[i].is_n_align_sized(chunk_size)); } diff --git a/src/test/erasure-code/TestErasureCodeClay.cc b/src/test/erasure-code/TestErasureCodeClay.cc index a0e6ade8078..2a3d6ca930b 100644 --- a/src/test/erasure-code/TestErasureCodeClay.cc +++ b/src/test/erasure-code/TestErasureCodeClay.cc @@ -24,6 +24,9 @@ #include "common/config_proxy.h" #include "gtest/gtest.h" +// FIXME: Clay is not yet supported in new EC. +IGNORE_DEPRECATED + using namespace std; TEST(ErasureCodeClay, sanity_check_k) @@ -585,6 +588,8 @@ TEST(ErasureCodeClay, create_rule) } } +END_IGNORE_DEPRECATED + /* * Local Variables: * compile-command: "cd ../.. ; diff --git a/src/test/erasure-code/TestErasureCodeExample.cc b/src/test/erasure-code/TestErasureCodeExample.cc index 9e67b9c9ca7..212c8531a5f 100644 --- a/src/test/erasure-code/TestErasureCodeExample.cc +++ b/src/test/erasure-code/TestErasureCodeExample.cc @@ -20,6 +20,8 @@ #include "global/global_context.h" #include "gtest/gtest.h" +IGNORE_DEPRECATED + using namespace std; TEST(ErasureCodeExample, chunk_size) @@ -32,79 +34,79 @@ TEST(ErasureCodeExample, chunk_size) TEST(ErasureCodeExample, minimum_to_decode) { ErasureCodeExample example; - set available_chunks; - set want_to_read; - want_to_read.insert(1); + shard_id_set available_chunks; + shard_id_set want_to_read; + want_to_read.insert(shard_id_t(1)); { - set minimum; + shard_id_set minimum; EXPECT_EQ(-EIO, example._minimum_to_decode(want_to_read, available_chunks, &minimum)); } - available_chunks.insert(0); - available_chunks.insert(2); + available_chunks.insert(shard_id_t(0)); + available_chunks.insert(shard_id_t(2)); { - set minimum; + shard_id_set minimum; EXPECT_EQ(0, example._minimum_to_decode(want_to_read, available_chunks, &minimum)); EXPECT_EQ(available_chunks, minimum); EXPECT_EQ(2u, minimum.size()); - EXPECT_EQ(1u, minimum.count(0)); - EXPECT_EQ(1u, minimum.count(2)); + EXPECT_EQ(1u, minimum.count(shard_id_t(0))); + EXPECT_EQ(1u, minimum.count(shard_id_t(2))); } { - set minimum; - available_chunks.insert(1); + shard_id_set minimum; + available_chunks.insert(shard_id_t(1)); EXPECT_EQ(0, example._minimum_to_decode(want_to_read, available_chunks, &minimum)); EXPECT_EQ(1u, minimum.size()); - EXPECT_EQ(1u, minimum.count(1)); + EXPECT_EQ(1u, minimum.count(shard_id_t(1))); } } TEST(ErasureCodeExample, minimum_to_decode_with_cost) { ErasureCodeExample example; - map available; - set want_to_read; - want_to_read.insert(1); + shard_id_map available(example.get_chunk_count()); + shard_id_set want_to_read; + want_to_read.insert(shard_id_t(1)); { - set minimum; + shard_id_set minimum; EXPECT_EQ(-EIO, example.minimum_to_decode_with_cost(want_to_read, available, &minimum)); } - available[0] = 1; - available[2] = 1; + available[shard_id_t(0)] = 1; + available[shard_id_t(2)] = 1; { - set minimum; + shard_id_set minimum; EXPECT_EQ(0, example.minimum_to_decode_with_cost(want_to_read, available, &minimum)); EXPECT_EQ(2u, minimum.size()); - EXPECT_EQ(1u, minimum.count(0)); - EXPECT_EQ(1u, minimum.count(2)); + EXPECT_EQ(1u, minimum.count(shard_id_t(0))); + EXPECT_EQ(1u, minimum.count(shard_id_t(2))); } { - set minimum; - available[1] = 1; + shard_id_set minimum; + available[shard_id_t(1)] = 1; EXPECT_EQ(0, example.minimum_to_decode_with_cost(want_to_read, available, &minimum)); EXPECT_EQ(1u, minimum.size()); - EXPECT_EQ(1u, minimum.count(1)); + EXPECT_EQ(1u, minimum.count(shard_id_t(1))); } { - set minimum; - available[1] = 2; + shard_id_set minimum; + available[shard_id_t(1)] = 2; EXPECT_EQ(0, example.minimum_to_decode_with_cost(want_to_read, available, &minimum)); EXPECT_EQ(2u, minimum.size()); - EXPECT_EQ(1u, minimum.count(0)); - EXPECT_EQ(1u, minimum.count(2)); + EXPECT_EQ(1u, minimum.count(shard_id_t(0))); + EXPECT_EQ(1u, minimum.count(shard_id_t(2))); } } @@ -114,59 +116,60 @@ TEST(ErasureCodeExample, encode_decode) bufferlist in; in.append("ABCDE"); - set want_to_encode; + shard_id_set want_to_encode; for(unsigned int i = 0; i < example.get_chunk_count(); i++) - want_to_encode.insert(i); - map encoded; + want_to_encode.insert(shard_id_t(i)); + shard_id_map encoded(example.get_chunk_count()); EXPECT_EQ(0, example.encode(want_to_encode, in, &encoded)); EXPECT_EQ(example.get_chunk_count(), encoded.size()); - EXPECT_EQ(example.get_chunk_size(in.length()), encoded[0].length()); - EXPECT_EQ('A', encoded[0][0]); - EXPECT_EQ('B', encoded[0][1]); - EXPECT_EQ('C', encoded[0][2]); - EXPECT_EQ('D', encoded[1][0]); - EXPECT_EQ('E', encoded[1][1]); - EXPECT_EQ('A'^'D', encoded[2][0]); - EXPECT_EQ('B'^'E', encoded[2][1]); - EXPECT_EQ('C'^0, encoded[2][2]); + EXPECT_EQ(example.get_chunk_size(in.length()), encoded[shard_id_t(0)].length()); + EXPECT_EQ('A', encoded[shard_id_t(0)][0]); + EXPECT_EQ('B', encoded[shard_id_t(0)][1]); + EXPECT_EQ('C', encoded[shard_id_t(0)][2]); + EXPECT_EQ('D', encoded[shard_id_t(1)][0]); + EXPECT_EQ('E', encoded[shard_id_t(1)][1]); + EXPECT_EQ('A'^'D', encoded[shard_id_t(2)][0]); + EXPECT_EQ('B'^'E', encoded[shard_id_t(2)][1]); + EXPECT_EQ('C'^0, encoded[shard_id_t(2)][2]); // all chunks are available { int want_to_decode[] = { 0, 1 }; - map decoded; - EXPECT_EQ(0, example._decode(set(want_to_decode, want_to_decode+2), + shard_id_map decoded(example.get_chunk_count()); + EXPECT_EQ(0, example._decode(shard_id_set (want_to_decode, want_to_decode+2), encoded, &decoded)); EXPECT_EQ(2u, decoded.size()); - EXPECT_EQ(3u, decoded[0].length()); - EXPECT_EQ('A', decoded[0][0]); - EXPECT_EQ('B', decoded[0][1]); - EXPECT_EQ('C', decoded[0][2]); - EXPECT_EQ('D', decoded[1][0]); - EXPECT_EQ('E', decoded[1][1]); + EXPECT_EQ(3u, decoded[shard_id_t(0)].length()); + EXPECT_EQ('A', decoded[shard_id_t(0)][0]); + EXPECT_EQ('B', decoded[shard_id_t(0)][1]); + EXPECT_EQ('C', decoded[shard_id_t(0)][2]); + EXPECT_EQ('D', decoded[shard_id_t(1)][0]); + EXPECT_EQ('E', decoded[shard_id_t(1)][1]); } - // one chunk is missing + // one chunk is missing { - map degraded = encoded; - degraded.erase(0); + shard_id_map degraded = encoded; + degraded.erase(shard_id_t(0)); EXPECT_EQ(2u, degraded.size()); int want_to_decode[] = { 0, 1 }; - map decoded; - EXPECT_EQ(0, example._decode(set(want_to_decode, want_to_decode+2), + shard_id_map decoded(example.get_chunk_count()); + EXPECT_EQ(0, example._decode(shard_id_set (want_to_decode, want_to_decode+2), degraded, &decoded)); EXPECT_EQ(2u, decoded.size()); - EXPECT_EQ(3u, decoded[0].length()); - EXPECT_EQ('A', decoded[0][0]); - EXPECT_EQ('B', decoded[0][1]); - EXPECT_EQ('C', decoded[0][2]); - EXPECT_EQ('D', decoded[1][0]); - EXPECT_EQ('E', decoded[1][1]); + EXPECT_EQ(3u, decoded[shard_id_t(0)].length()); + EXPECT_EQ('A', decoded[shard_id_t(0)][0]); + EXPECT_EQ('B', decoded[shard_id_t(0)][1]); + EXPECT_EQ('C', decoded[shard_id_t(0)][2]); + EXPECT_EQ('D', decoded[shard_id_t(1)][0]); + EXPECT_EQ('E', decoded[shard_id_t(1)][1]); } } -TEST(ErasureCodeExample, decode) +IGNORE_DEPRECATED +TEST(ErasureCodeExample, decode_legacy) { ErasureCodeExample example; @@ -219,10 +222,47 @@ TEST(ErasureCodeExample, decode) EXPECT_EQ(out.length(), encoded[0].length()); // cannot recover - map degraded; + map degraded; degraded[2] = encoded[2]; EXPECT_EQ(-ERANGE, example.decode_concat(degraded, &out)); } +END_IGNORE_DEPRECATED + +TEST(ErasureCodeExample, decode) +{ + ErasureCodeExample example; + +#define LARGE_ENOUGH 2048 + bufferptr in_ptr(buffer::create_page_aligned(LARGE_ENOUGH)); + in_ptr.zero(); + in_ptr.set_length(0); + const char *payload = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; + in_ptr.append(payload, strlen(payload)); + bufferlist in; + in.push_back(in_ptr); + int want_to_encode[] = { 0, 1, 2 }; + shard_id_map encoded(example.get_chunk_count()); + EXPECT_EQ(0, example.encode(shard_id_set(want_to_encode, want_to_encode+3), + in, + &encoded)); + EXPECT_EQ(3u, encoded.size()); + + // successful decode + bufferlist out; + shard_id_t shard0(0); + shard_id_t shard1(1); + encoded.erase(shard0); + shard_id_map decoded(example.get_chunk_count()); + EXPECT_EQ(0, example.decode(shard_id_set{shard0}, + encoded, &decoded, 0)); + bufferlist usable; + EXPECT_EQ(decoded[shard0].length(), encoded[shard1].length()); +} TEST(ErasureCodeExample, create_rule) { @@ -267,3 +307,4 @@ TEST(ErasureCodeExample, create_rule) * End: */ +END_IGNORE_DEPRECATED diff --git a/src/test/erasure-code/TestErasureCodeIsa.cc b/src/test/erasure-code/TestErasureCodeIsa.cc index 5235c10caba..797090805f5 100644 --- a/src/test/erasure-code/TestErasureCodeIsa.cc +++ b/src/test/erasure-code/TestErasureCodeIsa.cc @@ -31,19 +31,19 @@ ErasureCodeIsaTableCache tcache; class IsaErasureCodeTest : public ::testing::Test { public: - void compare_chunks(bufferlist &in, map &encoded); + void compare_chunks(bufferlist &in, shard_id_map &encoded); void encode_decode(unsigned object_size); }; -void IsaErasureCodeTest::compare_chunks(bufferlist &in, map &encoded) +void IsaErasureCodeTest::compare_chunks(bufferlist &in, shard_id_map &encoded) { unsigned object_size = in.length(); - unsigned chunk_size = encoded[0].length(); + unsigned chunk_size = encoded[shard_id_t(0)].length(); for (unsigned i = 0; i < encoded.size(); i++) { if (i * chunk_size >= object_size) break; int chunk_length = object_size > (i + 1) * chunk_size ? chunk_size : object_size - i * chunk_size; - EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + i * chunk_size, chunk_length)); + EXPECT_EQ(0, memcmp(encoded[shard_id_t(i)].c_str(), in.c_str() + i * chunk_size, chunk_length)); } } @@ -61,118 +61,118 @@ void IsaErasureCodeTest::encode_decode(unsigned object_size) // may be multiple bufferptr if object_size is larger than CEPH_PAGE_SIZE in.append(payload.c_str(), payload.length()); int want_to_encode[] = {0, 1, 2, 3}; - map encoded; - EXPECT_EQ(0, Isa.encode(set(want_to_encode, want_to_encode + 4), + shard_id_map encoded(Isa.get_chunk_count()); + EXPECT_EQ(0, Isa.encode(shard_id_set(want_to_encode, want_to_encode + 4), in, &encoded)); EXPECT_EQ(4u, encoded.size()); - unsigned chunk_size = encoded[0].length(); + unsigned chunk_size = encoded[shard_id_t(0)].length(); EXPECT_EQ(chunk_size, Isa.get_chunk_size(object_size)); compare_chunks(in, encoded); // all chunks are available { int want_to_decode[] = {0, 1}; - map decoded; - EXPECT_EQ(0, Isa._decode(set(want_to_decode, want_to_decode + 2), + shard_id_map decoded(Isa.get_chunk_count()); + EXPECT_EQ(0, Isa._decode(shard_id_set(want_to_decode, want_to_decode + 2), encoded, &decoded)); EXPECT_EQ(2u, decoded.size()); - EXPECT_EQ(chunk_size, decoded[0].length()); + EXPECT_EQ(chunk_size, decoded[shard_id_t(0)].length()); compare_chunks(in, decoded); } // one data chunk is missing { - map degraded = encoded; + shard_id_map degraded = encoded; - string enc1(encoded[1].c_str(), chunk_size); + string enc1(encoded[shard_id_t(1)].c_str(), chunk_size); - degraded.erase(1); + degraded.erase(shard_id_t(1)); EXPECT_EQ(3u, degraded.size()); int want_to_decode[] = {1}; - map decoded; - EXPECT_EQ(0, Isa._decode(set(want_to_decode, want_to_decode + 1), + shard_id_map decoded(Isa.get_chunk_count()); + EXPECT_EQ(0, Isa._decode(shard_id_set(want_to_decode, want_to_decode + 1), degraded, &decoded)); // always decode all, regardless of want_to_decode EXPECT_EQ(4u, decoded.size()); - EXPECT_EQ(chunk_size, decoded[1].length()); - EXPECT_EQ(0, memcmp(decoded[1].c_str(), enc1.c_str(), chunk_size)); + EXPECT_EQ(chunk_size, decoded[shard_id_t(1)].length()); + EXPECT_EQ(0, memcmp(decoded[shard_id_t(1)].c_str(), enc1.c_str(), chunk_size)); } // non-xor coding chunk is missing { - map degraded = encoded; + shard_id_map degraded = encoded; - string enc3(encoded[3].c_str(), chunk_size); + string enc3(encoded[shard_id_t(3)].c_str(), chunk_size); - degraded.erase(3); + degraded.erase(shard_id_t(3)); EXPECT_EQ(3u, degraded.size()); int want_to_decode[] = {3}; - map decoded; - EXPECT_EQ(0, Isa._decode(set(want_to_decode, want_to_decode + 1), + shard_id_map decoded(Isa.get_chunk_count()); + EXPECT_EQ(0, Isa._decode(shard_id_set(want_to_decode, want_to_decode + 1), degraded, &decoded)); // always decode all, regardless of want_to_decode EXPECT_EQ(4u, decoded.size()); - EXPECT_EQ(chunk_size, decoded[3].length()); - EXPECT_EQ(0, memcmp(decoded[3].c_str(), enc3.c_str(), chunk_size)); + EXPECT_EQ(chunk_size, decoded[shard_id_t(3)].length()); + EXPECT_EQ(0, memcmp(decoded[shard_id_t(3)].c_str(), enc3.c_str(), chunk_size)); } // xor coding chunk is missing { - map degraded = encoded; + shard_id_map degraded = encoded; - string enc2(encoded[2].c_str(), chunk_size); + string enc2(encoded[shard_id_t(2)].c_str(), chunk_size); - degraded.erase(2); + degraded.erase(shard_id_t(2)); EXPECT_EQ(3u, degraded.size()); int want_to_decode[] = {2}; - map decoded; - EXPECT_EQ(0, Isa._decode(set(want_to_decode, want_to_decode + 1), + shard_id_map decoded(Isa.get_chunk_count()); + EXPECT_EQ(0, Isa._decode(shard_id_set(want_to_decode, want_to_decode + 1), degraded, &decoded)); // always decode all, regardless of want_to_decode EXPECT_EQ(4u, decoded.size()); - EXPECT_EQ(chunk_size, decoded[2].length()); - EXPECT_EQ(0, memcmp(decoded[2].c_str(), enc2.c_str(), chunk_size)); + EXPECT_EQ(chunk_size, decoded[shard_id_t(2)].length()); + EXPECT_EQ(0, memcmp(decoded[shard_id_t(2)].c_str(), enc2.c_str(), chunk_size)); } // one data and one coding chunk is missing { - map degraded = encoded; + shard_id_map degraded = encoded; - string enc3(encoded[3].c_str(), chunk_size); + string enc3(encoded[shard_id_t(3)].c_str(), chunk_size); - degraded.erase(1); - degraded.erase(3); + degraded.erase(shard_id_t(1)); + degraded.erase(shard_id_t(3)); EXPECT_EQ(2u, degraded.size()); int want_to_decode[] = {1, 3}; - map decoded; - EXPECT_EQ(0, Isa._decode(set(want_to_decode, want_to_decode + 2), + shard_id_map decoded(Isa.get_chunk_count()); + EXPECT_EQ(0, Isa._decode(shard_id_set(want_to_decode, want_to_decode + 2), degraded, &decoded)); // always decode all, regardless of want_to_decode EXPECT_EQ(4u, decoded.size()); - EXPECT_EQ(chunk_size, decoded[1].length()); - EXPECT_EQ(0, memcmp(decoded[3].c_str(), enc3.c_str(), chunk_size)); + EXPECT_EQ(chunk_size, decoded[shard_id_t(1)].length()); + EXPECT_EQ(0, memcmp(decoded[shard_id_t(3)].c_str(), enc3.c_str(), chunk_size)); } // two data chunks are missing { - map degraded = encoded; - degraded.erase(0); - degraded.erase(1); + shard_id_map degraded = encoded; + degraded.erase(shard_id_t(0)); + degraded.erase(shard_id_t(1)); EXPECT_EQ(2u, degraded.size()); int want_to_decode[] = {0, 1}; - map decoded; - EXPECT_EQ(0, Isa._decode(set(want_to_decode, want_to_decode + 2), + shard_id_map decoded(Isa.get_chunk_count()); + EXPECT_EQ(0, Isa._decode(shard_id_set(want_to_decode, want_to_decode + 2), degraded, &decoded)); // always decode all, regardless of want_to_decode EXPECT_EQ(4u, decoded.size()); - EXPECT_EQ(chunk_size, decoded[0].length()); + EXPECT_EQ(chunk_size, decoded[shard_id_t(0)].length()); compare_chunks(in, decoded); } @@ -200,9 +200,9 @@ TEST_F(IsaErasureCodeTest, minimum_to_decode) // If trying to read nothing, the minimum is empty. // { - set want_to_read; - set available_chunks; - set minimum; + shard_id_set want_to_read; + shard_id_set available_chunks; + shard_id_set minimum; EXPECT_EQ(0, Isa._minimum_to_decode(want_to_read, available_chunks, @@ -213,11 +213,11 @@ TEST_F(IsaErasureCodeTest, minimum_to_decode) // There is no way to read a chunk if none are available. // { - set want_to_read; - set available_chunks; - set minimum; + shard_id_set want_to_read; + shard_id_set available_chunks; + shard_id_set minimum; - want_to_read.insert(0); + want_to_read.insert(shard_id_t(0)); EXPECT_EQ(-EIO, Isa._minimum_to_decode(want_to_read, available_chunks, @@ -227,12 +227,12 @@ TEST_F(IsaErasureCodeTest, minimum_to_decode) // Reading a subset of the available chunks is always possible. // { - set want_to_read; - set available_chunks; - set minimum; + shard_id_set want_to_read; + shard_id_set available_chunks; + shard_id_set minimum; - want_to_read.insert(0); - available_chunks.insert(0); + want_to_read.insert(shard_id_t(0)); + available_chunks.insert(shard_id_t(0)); EXPECT_EQ(0, Isa._minimum_to_decode(want_to_read, available_chunks, @@ -244,13 +244,13 @@ TEST_F(IsaErasureCodeTest, minimum_to_decode) // chunks available. // { - set want_to_read; - set available_chunks; - set minimum; + shard_id_set want_to_read; + shard_id_set available_chunks; + shard_id_set minimum; - want_to_read.insert(0); - want_to_read.insert(1); - available_chunks.insert(0); + want_to_read.insert(shard_id_t(0)); + want_to_read.insert(shard_id_t(1)); + available_chunks.insert(shard_id_t(0)); EXPECT_EQ(-EIO, Isa._minimum_to_decode(want_to_read, available_chunks, @@ -266,21 +266,21 @@ TEST_F(IsaErasureCodeTest, minimum_to_decode) // of CPU and memory. // { - set want_to_read; - set available_chunks; - set minimum; + shard_id_set want_to_read; + shard_id_set available_chunks; + shard_id_set minimum; - want_to_read.insert(1); - want_to_read.insert(3); - available_chunks.insert(0); - available_chunks.insert(2); - available_chunks.insert(3); + want_to_read.insert(shard_id_t(1)); + want_to_read.insert(shard_id_t(3)); + available_chunks.insert(shard_id_t(0)); + available_chunks.insert(shard_id_t(2)); + available_chunks.insert(shard_id_t(3)); EXPECT_EQ(0, Isa._minimum_to_decode(want_to_read, available_chunks, &minimum)); EXPECT_EQ(2u, minimum.size()); - EXPECT_EQ(0u, minimum.count(3)); + EXPECT_EQ(0u, minimum.count(shard_id_t(3))); } } @@ -333,16 +333,16 @@ TEST_F(IsaErasureCodeTest, encode) // it is not properly aligned, it is padded with zeros. // bufferlist in; - map encoded; + shard_id_map encoded(Isa.get_chunk_count()); int want_to_encode[] = { 0, 1, 2, 3 }; int trail_length = 1; in.append(string(aligned_object_size + trail_length, 'X')); - EXPECT_EQ(0, Isa.encode(set(want_to_encode, want_to_encode+4), + EXPECT_EQ(0, Isa.encode(shard_id_set(want_to_encode, want_to_encode+4), in, &encoded)); EXPECT_EQ(4u, encoded.size()); - char *last_chunk = encoded[1].c_str(); - int length =encoded[1].length(); + char *last_chunk = encoded[shard_id_t(1)].c_str(); + int length =encoded[shard_id_t(1)].length(); EXPECT_EQ('X', last_chunk[0]); EXPECT_EQ('\0', last_chunk[length - trail_length]); } @@ -357,9 +357,9 @@ TEST_F(IsaErasureCodeTest, encode) // valgrind (there is no leak). // bufferlist in; - map encoded; - set want_to_encode; - want_to_encode.insert(0); + shard_id_map encoded(Isa.get_chunk_count()); + shard_id_set want_to_encode; + want_to_encode.insert(shard_id_t(0)); int trail_length = 1; in.append(string(aligned_object_size + trail_length, 'X')); EXPECT_EQ(0, Isa.encode(want_to_encode, in, &encoded)); @@ -379,9 +379,9 @@ TEST_F(IsaErasureCodeTest, sanity_check_k) } bool -DecodeAndVerify(ErasureCodeIsaDefault& Isa, map °raded, set want_to_decode, buffer::ptr* enc, int length) +DecodeAndVerify(ErasureCodeIsaDefault& Isa, shard_id_map °raded, shard_id_set want_to_decode, buffer::ptr* enc, int length) { - map decoded; + shard_id_map decoded(Isa.get_chunk_count()); bool ok; // decode as requested @@ -391,7 +391,7 @@ DecodeAndVerify(ErasureCodeIsaDefault& Isa, map °raded, set< for (int i = 0; i < (int) decoded.size(); i++) { // compare all the buffers with their original - ok |= memcmp(decoded[i].c_str(), enc[i].c_str(), length); + ok |= memcmp(decoded[shard_id_t(i)].c_str(), enc[i].c_str(), length); } return ok; @@ -445,11 +445,11 @@ TEST_F(IsaErasureCodeTest, isa_vandermonde_exhaustive) bufferlist in; in.push_back(in_ptr); - setwant_to_encode; + shard_id_set want_to_encode; - map encoded; - for (int i = 0; i < (k + m); i++) { - want_to_encode.insert(i); + shard_id_map encoded(Isa.get_chunk_count()); + for (shard_id_t i; i < (k + m); ++i) { + want_to_encode.insert(shard_id_t(i)); } @@ -459,10 +459,10 @@ TEST_F(IsaErasureCodeTest, isa_vandermonde_exhaustive) EXPECT_EQ((unsigned) (k + m), encoded.size()); - unsigned length = encoded[0].length(); + unsigned length = encoded[shard_id_t(0)].length(); for (int i = 0; i < k; i++) { - EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + (i * length), length)); + EXPECT_EQ(0, memcmp(encoded[shard_id_t(i)].c_str(), in.c_str() + (i * length), length)); } buffer::ptr enc[k + m]; @@ -473,7 +473,7 @@ TEST_F(IsaErasureCodeTest, isa_vandermonde_exhaustive) enc[i] = newenc; enc[i].zero(); enc[i].set_length(0); - enc[i].append(encoded[i].c_str(), length); + enc[i].append(encoded[shard_id_t(i)].c_str(), length); } } @@ -481,43 +481,43 @@ TEST_F(IsaErasureCodeTest, isa_vandermonde_exhaustive) int cnt_cf = 0; for (int l1 = 0; l1 < (k + m); l1++) { - map degraded = encoded; - set want_to_decode; + shard_id_map degraded = encoded; + shard_id_set want_to_decode; bool err; - degraded.erase(l1); - want_to_decode.insert(l1); + degraded.erase(shard_id_t(l1)); + want_to_decode.insert(shard_id_t(l1)); err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length); EXPECT_EQ(0, err); cnt_cf++; for (int l2 = l1 + 1; l2 < (k + m); l2++) { - degraded.erase(l2); - want_to_decode.insert(l2); + degraded.erase(shard_id_t(l2)); + want_to_decode.insert(shard_id_t(l2)); err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length); EXPECT_EQ(0, err); cnt_cf++; for (int l3 = l2 + 1; l3 < (k + m); l3++) { - degraded.erase(l3); - want_to_decode.insert(l3); + degraded.erase(shard_id_t(l3)); + want_to_decode.insert(shard_id_t(l3)); err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length); EXPECT_EQ(0, err); cnt_cf++; for (int l4 = l3 + 1; l4 < (k + m); l4++) { - degraded.erase(l4); - want_to_decode.insert(l4); + degraded.erase(shard_id_t(l4)); + want_to_decode.insert(shard_id_t(l4)); err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length); EXPECT_EQ(0, err); - degraded[l4] = encoded[l4]; - want_to_decode.erase(l4); + degraded[shard_id_t(l4)] = encoded[shard_id_t(l4)]; + want_to_decode.erase(shard_id_t(l4)); cnt_cf++; } - degraded[l3] = encoded[l3]; - want_to_decode.erase(l3); + degraded[shard_id_t(l3)] = encoded[shard_id_t(l3)]; + want_to_decode.erase(shard_id_t(l3)); } - degraded[l2] = encoded[l2]; - want_to_decode.erase(l2); + degraded[shard_id_t(l2)] = encoded[shard_id_t(l2)]; + want_to_decode.erase(shard_id_t(l2)); } - degraded[l1] = encoded[l1]; - want_to_decode.erase(l1); + degraded[shard_id_t(l1)] = encoded[shard_id_t(l1)]; + want_to_decode.erase(shard_id_t(l1)); } EXPECT_EQ(2516, cnt_cf); EXPECT_EQ(2506, tcache.getDecodingTableCacheSize()); // 3 entries from (2,2) test and 2503 from (12,4) @@ -572,11 +572,11 @@ TEST_F(IsaErasureCodeTest, isa_cauchy_exhaustive) bufferlist in; in.push_back(in_ptr); - setwant_to_encode; + shard_id_set want_to_encode; - map encoded; + shard_id_map encoded(Isa.get_chunk_count()); for (int i = 0; i < (k + m); i++) { - want_to_encode.insert(i); + want_to_encode.insert(shard_id_t(i)); } @@ -586,10 +586,10 @@ TEST_F(IsaErasureCodeTest, isa_cauchy_exhaustive) EXPECT_EQ((unsigned) (k + m), encoded.size()); - unsigned length = encoded[0].length(); + unsigned length = encoded[shard_id_t(0)].length(); for (int i = 0; i < k; i++) { - EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + (i * length), length)); + EXPECT_EQ(0, memcmp(encoded[shard_id_t(i)].c_str(), in.c_str() + (i * length), length)); } buffer::ptr enc[k + m]; @@ -600,7 +600,7 @@ TEST_F(IsaErasureCodeTest, isa_cauchy_exhaustive) enc[i] = newenc; enc[i].zero(); enc[i].set_length(0); - enc[i].append(encoded[i].c_str(), length); + enc[i].append(encoded[shard_id_t(i)].c_str(), length); } } @@ -608,43 +608,43 @@ TEST_F(IsaErasureCodeTest, isa_cauchy_exhaustive) int cnt_cf = 0; for (int l1 = 0; l1 < (k + m); l1++) { - map degraded = encoded; - set want_to_decode; + shard_id_map degraded = encoded; + shard_id_set want_to_decode; bool err; - degraded.erase(l1); - want_to_decode.insert(l1); + degraded.erase(shard_id_t(l1)); + want_to_decode.insert(shard_id_t(l1)); err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length); EXPECT_EQ(0, err); cnt_cf++; for (int l2 = l1 + 1; l2 < (k + m); l2++) { - degraded.erase(l2); - want_to_decode.insert(l2); + degraded.erase(shard_id_t(l2)); + want_to_decode.insert(shard_id_t(l2)); err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length); EXPECT_EQ(0, err); cnt_cf++; for (int l3 = l2 + 1; l3 < (k + m); l3++) { - degraded.erase(l3); - want_to_decode.insert(l3); + degraded.erase(shard_id_t(l3)); + want_to_decode.insert(shard_id_t(l3)); err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length); EXPECT_EQ(0, err); cnt_cf++; for (int l4 = l3 + 1; l4 < (k + m); l4++) { - degraded.erase(l4); - want_to_decode.insert(l4); + degraded.erase(shard_id_t(l4)); + want_to_decode.insert(shard_id_t(l4)); err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length); EXPECT_EQ(0, err); - degraded[l4] = encoded[l4]; - want_to_decode.erase(l4); + degraded[shard_id_t(l4)] = encoded[shard_id_t(l4)]; + want_to_decode.erase(shard_id_t(l4)); cnt_cf++; } - degraded[l3] = encoded[l3]; - want_to_decode.erase(l3); + degraded[shard_id_t(l3)] = encoded[shard_id_t(l3)]; + want_to_decode.erase(shard_id_t(l3)); } - degraded[l2] = encoded[l2]; - want_to_decode.erase(l2); + degraded[shard_id_t(l2)] = encoded[shard_id_t(l2)]; + want_to_decode.erase(shard_id_t(l2)); } - degraded[l1] = encoded[l1]; - want_to_decode.erase(l1); + degraded[shard_id_t(l1)] = encoded[shard_id_t(l1)]; + want_to_decode.erase(shard_id_t(l1)); } EXPECT_EQ(2516, cnt_cf); EXPECT_EQ(2516, tcache.getDecodingTableCacheSize(ErasureCodeIsaDefault::kCauchy)); @@ -699,11 +699,11 @@ TEST_F(IsaErasureCodeTest, isa_cauchy_cache_trash) bufferlist in; in.push_back(in_ptr); - setwant_to_encode; + shard_id_set want_to_encode; - map encoded; + shard_id_map encoded(Isa.get_chunk_count()); for (int i = 0; i < (k + m); i++) { - want_to_encode.insert(i); + want_to_encode.insert(shard_id_t(i)); } @@ -713,10 +713,10 @@ TEST_F(IsaErasureCodeTest, isa_cauchy_cache_trash) EXPECT_EQ((unsigned) (k + m), encoded.size()); - unsigned length = encoded[0].length(); + unsigned length = encoded[shard_id_t(0)].length(); for (int i = 0; i < k; i++) { - EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + (i * length), length)); + EXPECT_EQ(0, memcmp(encoded[shard_id_t(i)].c_str(), in.c_str() + (i * length), length)); } buffer::ptr enc[k + m]; @@ -727,7 +727,7 @@ TEST_F(IsaErasureCodeTest, isa_cauchy_cache_trash) enc[i] = newenc; enc[i].zero(); enc[i].set_length(0); - enc[i].append(encoded[i].c_str(), length); + enc[i].append(encoded[shard_id_t(i)].c_str(), length); } } @@ -735,43 +735,43 @@ TEST_F(IsaErasureCodeTest, isa_cauchy_cache_trash) int cnt_cf = 0; for (int l1 = 0; l1 < (k + m); l1++) { - map degraded = encoded; - set want_to_decode; + shard_id_map degraded = encoded; + shard_id_set want_to_decode; bool err; - degraded.erase(l1); - want_to_decode.insert(l1); + degraded.erase(shard_id_t(l1)); + want_to_decode.insert(shard_id_t(l1)); err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length); EXPECT_EQ(0, err); cnt_cf++; for (int l2 = l1 + 1; l2 < (k + m); l2++) { - degraded.erase(l2); - want_to_decode.insert(l2); + degraded.erase(shard_id_t(l2)); + want_to_decode.insert(shard_id_t(l2)); err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length); EXPECT_EQ(0, err); cnt_cf++; for (int l3 = l2 + 1; l3 < (k + m); l3++) { - degraded.erase(l3); - want_to_decode.insert(l3); + degraded.erase(shard_id_t(l3)); + want_to_decode.insert(shard_id_t(l3)); err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length); EXPECT_EQ(0, err); cnt_cf++; for (int l4 = l3 + 1; l4 < (k + m); l4++) { - degraded.erase(l4); - want_to_decode.insert(l4); + degraded.erase(shard_id_t(l4)); + want_to_decode.insert(shard_id_t(l4)); err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length); EXPECT_EQ(0, err); - degraded[l4] = encoded[l4]; - want_to_decode.erase(l4); + degraded[shard_id_t(l4)] = encoded[shard_id_t(l4)]; + want_to_decode.erase(shard_id_t(l4)); cnt_cf++; } - degraded[l3] = encoded[l3]; - want_to_decode.erase(l3); + degraded[shard_id_t(l3)] = encoded[shard_id_t(l3)]; + want_to_decode.erase(shard_id_t(l3)); } - degraded[l2] = encoded[l2]; - want_to_decode.erase(l2); + degraded[shard_id_t(l2)] = encoded[shard_id_t(l2)]; + want_to_decode.erase(shard_id_t(l2)); } - degraded[l1] = encoded[l1]; - want_to_decode.erase(l1); + degraded[shard_id_t(l1)] = encoded[shard_id_t(l1)]; + want_to_decode.erase(shard_id_t(l1)); } EXPECT_EQ(6195, cnt_cf); EXPECT_EQ(2516, tcache.getDecodingTableCacheSize(ErasureCodeIsaDefault::kCauchy)); @@ -825,11 +825,11 @@ TEST_F(IsaErasureCodeTest, isa_xor_codec) bufferlist in; in.push_back(in_ptr); - setwant_to_encode; + shard_id_set want_to_encode; - map encoded; + shard_id_map encoded(Isa.get_chunk_count()); for (int i = 0; i < (k + m); i++) { - want_to_encode.insert(i); + want_to_encode.insert(shard_id_t(i)); } @@ -839,10 +839,10 @@ TEST_F(IsaErasureCodeTest, isa_xor_codec) EXPECT_EQ((unsigned) (k + m), encoded.size()); - unsigned length = encoded[0].length(); + unsigned length = encoded[shard_id_t(0)].length(); for (int i = 0; i < k; i++) { - EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + (i * length), length)); + EXPECT_EQ(0, memcmp(encoded[shard_id_t(i)].c_str(), in.c_str() + (i * length), length)); } buffer::ptr enc[k + m]; @@ -853,7 +853,7 @@ TEST_F(IsaErasureCodeTest, isa_xor_codec) enc[i] = newenc; enc[i].zero(); enc[i].set_length(0); - enc[i].append(encoded[i].c_str(), length); + enc[i].append(encoded[shard_id_t(i)].c_str(), length); } } @@ -861,16 +861,16 @@ TEST_F(IsaErasureCodeTest, isa_xor_codec) int cnt_cf = 0; for (int l1 = 0; l1 < (k + m); l1++) { - map degraded = encoded; - set want_to_decode; + shard_id_map degraded = encoded; + shard_id_set want_to_decode; bool err; - degraded.erase(l1); - want_to_decode.insert(l1); + degraded.erase(shard_id_t(l1)); + want_to_decode.insert(shard_id_t(l1)); err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length); EXPECT_EQ(0, err); cnt_cf++; - degraded[l1] = encoded[l1]; - want_to_decode.erase(l1); + degraded[shard_id_t(l1)] = encoded[shard_id_t(l1)]; + want_to_decode.erase(shard_id_t(l1)); } EXPECT_EQ(5, cnt_cf); } diff --git a/src/test/erasure-code/TestErasureCodeJerasure.cc b/src/test/erasure-code/TestErasureCodeJerasure.cc index 3946892c8aa..95490cd0cb0 100644 --- a/src/test/erasure-code/TestErasureCodeJerasure.cc +++ b/src/test/erasure-code/TestErasureCodeJerasure.cc @@ -84,76 +84,49 @@ TYPED_TEST(ErasureCodeTest, encode_decode) bufferlist in; in.push_back(in_ptr); int want_to_encode[] = { 0, 1, 2, 3 }; - map encoded; - EXPECT_EQ(0, jerasure.encode(set(want_to_encode, want_to_encode+4), + shard_id_map< bufferlist> encoded(jerasure.get_chunk_count()); + EXPECT_EQ(0, jerasure.encode(shard_id_set(want_to_encode, want_to_encode+4), in, &encoded)); EXPECT_EQ(4u, encoded.size()); - unsigned length = encoded[0].length(); - EXPECT_EQ(0, memcmp(encoded[0].c_str(), in.c_str(), length)); - EXPECT_EQ(0, memcmp(encoded[1].c_str(), in.c_str() + length, + unsigned length = encoded[shard_id_t(0)].length(); + EXPECT_EQ(0, memcmp(encoded[shard_id_t(0)].c_str(), in.c_str(), length)); + EXPECT_EQ(0, memcmp(encoded[shard_id_t(1)].c_str(), in.c_str() + length, in.length() - length)); // all chunks are available { int want_to_decode[] = { 0, 1 }; - map decoded; - EXPECT_EQ(0, jerasure._decode(set(want_to_decode, want_to_decode+2), + shard_id_map< bufferlist> decoded(jerasure.get_chunk_count()); + EXPECT_EQ(0, jerasure._decode(shard_id_set(want_to_decode, want_to_decode+2), encoded, &decoded)); EXPECT_EQ(2u, decoded.size()); - EXPECT_EQ(length, decoded[0].length()); - EXPECT_EQ(0, memcmp(decoded[0].c_str(), in.c_str(), length)); - EXPECT_EQ(0, memcmp(decoded[1].c_str(), in.c_str() + length, + EXPECT_EQ(length, decoded[shard_id_t(0)].length()); + EXPECT_EQ(0, memcmp(decoded[shard_id_t(0)].c_str(), in.c_str(), length)); + EXPECT_EQ(0, memcmp(decoded[shard_id_t(1)].c_str(), in.c_str() + length, in.length() - length)); } // two chunks are missing { - map degraded = encoded; - degraded.erase(0); - degraded.erase(1); + shard_id_map< bufferlist> degraded = encoded; + degraded.erase(shard_id_t(0)); + degraded.erase(shard_id_t(1)); EXPECT_EQ(2u, degraded.size()); int want_to_decode[] = { 0, 1 }; - map decoded; - EXPECT_EQ(0, jerasure._decode(set(want_to_decode, want_to_decode+2), + shard_id_map< bufferlist> decoded(jerasure.get_chunk_count()); + EXPECT_EQ(0, jerasure._decode(shard_id_set(want_to_decode, want_to_decode+2), degraded, &decoded)); // always decode all, regardless of want_to_decode EXPECT_EQ(4u, decoded.size()); - EXPECT_EQ(length, decoded[0].length()); - EXPECT_EQ(0, memcmp(decoded[0].c_str(), in.c_str(), length)); - EXPECT_EQ(0, memcmp(decoded[1].c_str(), in.c_str() + length, + EXPECT_EQ(length, decoded[shard_id_t(0)].length()); + EXPECT_EQ(0, memcmp(decoded[shard_id_t(0)].c_str(), in.c_str(), length)); + EXPECT_EQ(0, memcmp(decoded[shard_id_t(1)].c_str(), in.c_str() + length, in.length() - length)); } - - // partial decode with the exact-sized decode_concat() - { - map partial_decode = encoded; - // we have everything but want only the first chunk - set partial_want_to_read = { 0 }; - EXPECT_EQ(1u, partial_want_to_read.size()); - bufferlist out; - EXPECT_EQ(0, jerasure.decode_concat(partial_want_to_read, - partial_decode, - &out)); - EXPECT_EQ(out.length(), partial_decode[0].length()); - } - - // partial degraded decode with the exact-sized decode_concat() - { - map partial_decode = encoded; - // we have everything but what we really want - partial_decode.erase(0); - set partial_want_to_read = { 0 }; - EXPECT_EQ(1u, partial_want_to_read.size()); - bufferlist out; - EXPECT_EQ(0, jerasure.decode_concat(partial_want_to_read, - partial_decode, - &out)); - EXPECT_EQ(out.length(), encoded[0].length()); - } } } @@ -171,9 +144,9 @@ TYPED_TEST(ErasureCodeTest, minimum_to_decode) // If trying to read nothing, the minimum is empty. // { - set want_to_read; - set available_chunks; - set minimum; + shard_id_set want_to_read; + shard_id_set available_chunks; + shard_id_set minimum; EXPECT_EQ(0, jerasure._minimum_to_decode(want_to_read, available_chunks, @@ -184,11 +157,11 @@ TYPED_TEST(ErasureCodeTest, minimum_to_decode) // There is no way to read a chunk if none are available. // { - set want_to_read; - set available_chunks; - set minimum; + shard_id_set want_to_read; + shard_id_set available_chunks; + shard_id_set minimum; - want_to_read.insert(0); + want_to_read.insert(shard_id_t(0)); EXPECT_EQ(-EIO, jerasure._minimum_to_decode(want_to_read, available_chunks, @@ -198,12 +171,12 @@ TYPED_TEST(ErasureCodeTest, minimum_to_decode) // Reading a subset of the available chunks is always possible. // { - set want_to_read; - set available_chunks; - set minimum; + shard_id_set want_to_read; + shard_id_set available_chunks; + shard_id_set minimum; - want_to_read.insert(0); - available_chunks.insert(0); + want_to_read.insert(shard_id_t(0)); + available_chunks.insert(shard_id_t(0)); EXPECT_EQ(0, jerasure._minimum_to_decode(want_to_read, available_chunks, @@ -215,13 +188,13 @@ TYPED_TEST(ErasureCodeTest, minimum_to_decode) // chunks available. // { - set want_to_read; - set available_chunks; - set minimum; + shard_id_set want_to_read; + shard_id_set available_chunks; + shard_id_set minimum; - want_to_read.insert(0); - want_to_read.insert(1); - available_chunks.insert(0); + want_to_read.insert(shard_id_t(0)); + want_to_read.insert(shard_id_t(1)); + available_chunks.insert(shard_id_t(0)); EXPECT_EQ(-EIO, jerasure._minimum_to_decode(want_to_read, available_chunks, @@ -237,21 +210,21 @@ TYPED_TEST(ErasureCodeTest, minimum_to_decode) // of CPU and memory. // { - set want_to_read; - set available_chunks; - set minimum; + shard_id_set want_to_read; + shard_id_set available_chunks; + shard_id_set minimum; - want_to_read.insert(1); - want_to_read.insert(3); - available_chunks.insert(0); - available_chunks.insert(2); - available_chunks.insert(3); + want_to_read.insert(shard_id_t(1)); + want_to_read.insert(shard_id_t(3)); + available_chunks.insert(shard_id_t(0)); + available_chunks.insert(shard_id_t(2)); + available_chunks.insert(shard_id_t(3)); EXPECT_EQ(0, jerasure._minimum_to_decode(want_to_read, available_chunks, &minimum)); EXPECT_EQ(2u, minimum.size()); - EXPECT_EQ(0u, minimum.count(3)); + EXPECT_EQ(0u, minimum.count(shard_id_t(3))); } } @@ -271,16 +244,16 @@ TEST(ErasureCodeTest, encode) // it is not properly aligned, it is padded with zeros. // bufferlist in; - map encoded; + shard_id_map encoded(jerasure.get_chunk_count()); int want_to_encode[] = { 0, 1, 2, 3 }; int trail_length = 1; in.append(string(aligned_object_size + trail_length, 'X')); - EXPECT_EQ(0, jerasure.encode(set(want_to_encode, want_to_encode+4), + EXPECT_EQ(0, jerasure.encode(shard_id_set(want_to_encode, want_to_encode+4), in, &encoded)); EXPECT_EQ(4u, encoded.size()); - char *last_chunk = encoded[1].c_str(); - int length =encoded[1].length(); + char *last_chunk = encoded[shard_id_t(1)].c_str(); + int length =encoded[shard_id_t(1)].length(); EXPECT_EQ('X', last_chunk[0]); EXPECT_EQ('\0', last_chunk[length - trail_length]); } @@ -295,9 +268,9 @@ TEST(ErasureCodeTest, encode) // valgrind (there is no leak). // bufferlist in; - map encoded; - set want_to_encode; - want_to_encode.insert(0); + shard_id_map encoded(jerasure.get_chunk_count()); + shard_id_set want_to_encode; + want_to_encode.insert(shard_id_t(0)); int trail_length = 1; in.append(string(aligned_object_size + trail_length, 'X')); EXPECT_EQ(0, jerasure.encode(want_to_encode, in, &encoded)); diff --git a/src/test/erasure-code/TestErasureCodeLrc.cc b/src/test/erasure-code/TestErasureCodeLrc.cc index 6bfc25e7f0c..9ffd9a7e004 100644 --- a/src/test/erasure-code/TestErasureCodeLrc.cc +++ b/src/test/erasure-code/TestErasureCodeLrc.cc @@ -465,12 +465,12 @@ TEST(ErasureCodeLrc, minimum_to_decode) "]"; profile["layers"] = description_string; EXPECT_EQ(0, lrc.init(profile, &cerr)); - set want_to_read; - want_to_read.insert(1); - set available_chunks; - available_chunks.insert(1); - available_chunks.insert(2); - set minimum; + shard_id_set want_to_read; + want_to_read.insert(shard_id_t(1)); + shard_id_set available_chunks; + available_chunks.insert(shard_id_t(1)); + available_chunks.insert(shard_id_t(2)); + shard_id_set minimum; EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum)); EXPECT_EQ(want_to_read, minimum); } @@ -493,34 +493,34 @@ TEST(ErasureCodeLrc, minimum_to_decode) lrc.get_chunk_count()); { // want to read the last chunk - set want_to_read; - want_to_read.insert(lrc.get_chunk_count() - 1); + shard_id_set want_to_read; + want_to_read.insert(shard_id_t(lrc.get_chunk_count() - 1)); // all chunks are available except the last chunk - set available_chunks; + shard_id_set available_chunks; for (int i = 0; i < (int)lrc.get_chunk_count() - 1; i++) - available_chunks.insert(i); + available_chunks.insert(shard_id_t(i)); // _____DDDDc can recover c - set minimum; + shard_id_set minimum; EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum)); - set expected_minimum; - expected_minimum.insert(5); - expected_minimum.insert(6); - expected_minimum.insert(7); - expected_minimum.insert(8); + shard_id_set expected_minimum; + expected_minimum.insert(shard_id_t(5)); + expected_minimum.insert(shard_id_t(6)); + expected_minimum.insert(shard_id_t(7)); + expected_minimum.insert(shard_id_t(8)); EXPECT_EQ(expected_minimum, minimum); } { - set want_to_read; - want_to_read.insert(0); - set available_chunks; + shard_id_set want_to_read; + want_to_read.insert(shard_id_t(0)); + shard_id_set available_chunks; for (int i = 1; i < (int)lrc.get_chunk_count(); i++) - available_chunks.insert(i); - set minimum; + available_chunks.insert(shard_id_t(i)); + shard_id_set minimum; EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum)); - set expected_minimum; - expected_minimum.insert(2); - expected_minimum.insert(3); - expected_minimum.insert(4); + shard_id_set expected_minimum; + expected_minimum.insert(shard_id_t(2)); + expected_minimum.insert(shard_id_t(3)); + expected_minimum.insert(shard_id_t(4)); EXPECT_EQ(expected_minimum, minimum); } } @@ -540,23 +540,23 @@ TEST(ErasureCodeLrc, minimum_to_decode) EXPECT_EQ(0, lrc.init(profile, &cerr)); EXPECT_EQ(profile["mapping"].length(), lrc.get_chunk_count()); - set want_to_read; - want_to_read.insert(8); + shard_id_set want_to_read; + want_to_read.insert(shard_id_t(8)); // // unable to recover, too many chunks missing // { - set available_chunks; - available_chunks.insert(0); - available_chunks.insert(1); + shard_id_set available_chunks; + available_chunks.insert(shard_id_t(0)); + available_chunks.insert(shard_id_t(1)); // missing (2) // missing (3) - available_chunks.insert(4); - available_chunks.insert(5); - available_chunks.insert(6); + available_chunks.insert(shard_id_t(4)); + available_chunks.insert(shard_id_t(5)); + available_chunks.insert(shard_id_t(6)); // missing (7) // missing (8) - set minimum; + shard_id_set minimum; EXPECT_EQ(-EIO, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum)); } // @@ -585,17 +585,17 @@ TEST(ErasureCodeLrc, minimum_to_decode) // _cDDD_cDD success: recovers chunk 7, 8 // { - set available_chunks; - available_chunks.insert(0); - available_chunks.insert(1); + shard_id_set available_chunks; + available_chunks.insert(shard_id_t(0)); + available_chunks.insert(shard_id_t(1)); // missing (2) - available_chunks.insert(3); - available_chunks.insert(4); - available_chunks.insert(5); - available_chunks.insert(6); + available_chunks.insert(shard_id_t(3)); + available_chunks.insert(shard_id_t(4)); + available_chunks.insert(shard_id_t(5)); + available_chunks.insert(shard_id_t(6)); // missing (7) // missing (8) - set minimum; + shard_id_set minimum; EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum)); EXPECT_EQ(available_chunks, minimum); } @@ -620,117 +620,124 @@ TEST(ErasureCodeLrc, encode_decode) unsigned int chunk_size = g_conf().get_val("osd_pool_erasure_code_stripe_unit"); unsigned int stripe_width = lrc.get_data_chunk_count() * chunk_size; EXPECT_EQ(chunk_size, lrc.get_chunk_size(stripe_width)); - set want_to_encode; - map encoded; + shard_id_set want_to_encode; + shard_id_map encoded(lrc.get_chunk_count()); for (unsigned int i = 0; i < lrc.get_chunk_count(); ++i) { - want_to_encode.insert(i); + want_to_encode.insert(shard_id_t(i)); bufferptr ptr(buffer::create_page_aligned(chunk_size)); bufferlist tmp; tmp.push_back(ptr); - tmp.claim_append(encoded[i]); - encoded[i].swap(tmp); + tmp.claim_append(encoded[shard_id_t(i)]); + encoded[shard_id_t(i)].swap(tmp); } - const vector &mapping = lrc.get_chunk_mapping(); + const vector &mapping = lrc.get_chunk_mapping(); char c = 'A'; for (unsigned int i = 0; i < lrc.get_data_chunk_count(); i++) { - int j = mapping[i]; + shard_id_t j = mapping[i]; string s(chunk_size, c); encoded[j].clear(); encoded[j].append(s); c++; } - EXPECT_EQ(0, lrc.encode_chunks(want_to_encode, &encoded)); + shard_id_map in(lrc.get_chunk_count()); + shard_id_map out(lrc.get_chunk_count()); + for (auto&& [shard, list] : encoded) { + auto bp = list.begin().get_current_ptr(); + if (shard < lrc.get_data_chunk_count()) in[shard] = bp; + else out[shard] = bp; + } + EXPECT_EQ(0, lrc.encode_chunks(in, out)); { - map chunks; - chunks[4] = encoded[4]; - chunks[5] = encoded[5]; - chunks[6] = encoded[6]; - set want_to_read; - want_to_read.insert(7); - set available_chunks; - available_chunks.insert(4); - available_chunks.insert(5); - available_chunks.insert(6); - set minimum; + shard_id_map chunks(lrc.get_chunk_count()); + chunks[shard_id_t(4)] = encoded[shard_id_t(4)]; + chunks[shard_id_t(5)] = encoded[shard_id_t(5)]; + chunks[shard_id_t(6)] = encoded[shard_id_t(6)]; + shard_id_set want_to_read; + want_to_read.insert(shard_id_t(7)); + shard_id_set available_chunks; + available_chunks.insert(shard_id_t(4)); + available_chunks.insert(shard_id_t(5)); + available_chunks.insert(shard_id_t(6)); + shard_id_set minimum; EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum)); // only need three chunks from the second local layer EXPECT_EQ(3U, minimum.size()); - EXPECT_EQ(1U, minimum.count(4)); - EXPECT_EQ(1U, minimum.count(5)); - EXPECT_EQ(1U, minimum.count(6)); - map decoded; + EXPECT_EQ(1U, minimum.count(shard_id_t(4))); + EXPECT_EQ(1U, minimum.count(shard_id_t(5))); + EXPECT_EQ(1U, minimum.count(shard_id_t(6))); + shard_id_map decoded(lrc.get_chunk_count()); EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded)); string s(chunk_size, 'D'); - EXPECT_EQ(s, string(decoded[7].c_str(), chunk_size)); + EXPECT_EQ(s, string(decoded[shard_id_t(7)].c_str(), chunk_size)); } { - set want_to_read; - want_to_read.insert(2); - map chunks; - chunks[1] = encoded[1]; - chunks[3] = encoded[3]; - chunks[5] = encoded[5]; - chunks[6] = encoded[6]; - chunks[7] = encoded[7]; - set available_chunks; - available_chunks.insert(1); - available_chunks.insert(3); - available_chunks.insert(5); - available_chunks.insert(6); - available_chunks.insert(7); - set minimum; + shard_id_set want_to_read; + want_to_read.insert(shard_id_t(2)); + shard_id_map chunks(lrc.get_chunk_count()); + chunks[shard_id_t(1)] = encoded[shard_id_t(1)]; + chunks[shard_id_t(3)] = encoded[shard_id_t(3)]; + chunks[shard_id_t(5)] = encoded[shard_id_t(5)]; + chunks[shard_id_t(6)] = encoded[shard_id_t(6)]; + chunks[shard_id_t(7)] = encoded[shard_id_t(7)]; + shard_id_set available_chunks; + available_chunks.insert(shard_id_t(1)); + available_chunks.insert(shard_id_t(3)); + available_chunks.insert(shard_id_t(5)); + available_chunks.insert(shard_id_t(6)); + available_chunks.insert(shard_id_t(7)); + shard_id_set minimum; EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum)); EXPECT_EQ(5U, minimum.size()); EXPECT_EQ(available_chunks, minimum); - map decoded; + shard_id_map decoded(lrc.get_chunk_count()); EXPECT_EQ(0, lrc._decode(want_to_read, encoded, &decoded)); string s(chunk_size, 'A'); - EXPECT_EQ(s, string(decoded[2].c_str(), chunk_size)); + EXPECT_EQ(s, string(decoded[shard_id_t(2)].c_str(), chunk_size)); } { - set want_to_read; - want_to_read.insert(3); - want_to_read.insert(6); - want_to_read.insert(7); - set available_chunks; - available_chunks.insert(0); - available_chunks.insert(1); - available_chunks.insert(2); - // available_chunks.insert(3); - available_chunks.insert(4); - available_chunks.insert(5); - // available_chunks.insert(6); - // available_chunks.insert(7); - encoded.erase(3); - encoded.erase(6); - set minimum; + shard_id_set want_to_read; + want_to_read.insert(shard_id_t(3)); + want_to_read.insert(shard_id_t(6)); + want_to_read.insert(shard_id_t(7)); + shard_id_set available_chunks; + available_chunks.insert(shard_id_t(0)); + available_chunks.insert(shard_id_t(1)); + available_chunks.insert(shard_id_t(2)); + // available_chunks.insert(shard_id_t(3)); + available_chunks.insert(shard_id_t(4)); + available_chunks.insert(shard_id_t(5)); + // available_chunks.insert(shard_id_t(6)); + // available_chunks.insert(shard_id_t(7)); + encoded.erase(shard_id_t(3)); + encoded.erase(shard_id_t(6)); + shard_id_set minimum; EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum)); EXPECT_EQ(4U, minimum.size()); // only need two chunks from the first local layer - EXPECT_EQ(1U, minimum.count(0)); - EXPECT_EQ(1U, minimum.count(2)); + EXPECT_EQ(1U, minimum.count(shard_id_t(0))); + EXPECT_EQ(1U, minimum.count(shard_id_t(2))); // the above chunks will rebuild chunk 3 and the global layer only needs // three more chunks to reach the required amount of chunks (4) to recover // the last two - EXPECT_EQ(1U, minimum.count(1)); - EXPECT_EQ(1U, minimum.count(2)); - EXPECT_EQ(1U, minimum.count(5)); + EXPECT_EQ(1U, minimum.count(shard_id_t(1))); + EXPECT_EQ(1U, minimum.count(shard_id_t(2))); + EXPECT_EQ(1U, minimum.count(shard_id_t(5))); - map decoded; + shard_id_map decoded(lrc.get_chunk_count()); EXPECT_EQ(0, lrc._decode(want_to_read, encoded, &decoded)); { string s(chunk_size, 'B'); - EXPECT_EQ(s, string(decoded[3].c_str(), chunk_size)); + EXPECT_EQ(s, string(decoded[shard_id_t(3)].c_str(), chunk_size)); } { string s(chunk_size, 'C'); - EXPECT_EQ(s, string(decoded[6].c_str(), chunk_size)); + EXPECT_EQ(s, string(decoded[shard_id_t(6)].c_str(), chunk_size)); } { string s(chunk_size, 'D'); - EXPECT_EQ(s, string(decoded[7].c_str(), chunk_size)); + EXPECT_EQ(s, string(decoded[shard_id_t(7)].c_str(), chunk_size)); } } } @@ -753,165 +760,172 @@ TEST(ErasureCodeLrc, encode_decode_2) unsigned int chunk_size = g_conf().get_val("osd_pool_erasure_code_stripe_unit"); unsigned int stripe_width = lrc.get_data_chunk_count() * chunk_size; EXPECT_EQ(chunk_size, lrc.get_chunk_size(stripe_width)); - set want_to_encode; - map encoded; + shard_id_set want_to_encode; + shard_id_map encoded(lrc.get_chunk_count()); for (unsigned int i = 0; i < lrc.get_chunk_count(); ++i) { - want_to_encode.insert(i); + want_to_encode.insert(shard_id_t(i)); bufferptr ptr(buffer::create_page_aligned(chunk_size)); bufferlist tmp; tmp.push_back(ptr); - tmp.claim_append(encoded[i]); - encoded[i].swap(tmp); + tmp.claim_append(encoded[shard_id_t(i)]); + encoded[shard_id_t(i)].swap(tmp); } - const vector &mapping = lrc.get_chunk_mapping(); + const vector &mapping = lrc.get_chunk_mapping(); char c = 'A'; for (unsigned int i = 0; i < lrc.get_data_chunk_count(); i++) { - int j = mapping[i]; + shard_id_t j = mapping[i]; string s(chunk_size, c); encoded[j].clear(); encoded[j].append(s); c++; } - EXPECT_EQ(0, lrc.encode_chunks(want_to_encode, &encoded)); + shard_id_map in(lrc.get_chunk_count()); + shard_id_map out(lrc.get_chunk_count()); + for (auto&& [shard, list] : encoded) { + auto bp = list.begin().get_current_ptr(); + if (shard < lrc.get_data_chunk_count()) in[shard] = bp; + else out[shard] = bp; + } + EXPECT_EQ(0, lrc.encode_chunks(in, out)); { - set want_to_read; - want_to_read.insert(0); - map chunks; - chunks[1] = encoded[1]; - chunks[3] = encoded[3]; - chunks[4] = encoded[4]; - chunks[5] = encoded[5]; - chunks[6] = encoded[6]; - chunks[7] = encoded[7]; - set available_chunks; - available_chunks.insert(1); - available_chunks.insert(3); - available_chunks.insert(4); - available_chunks.insert(5); - available_chunks.insert(6); - available_chunks.insert(7); - set minimum; + shard_id_set want_to_read; + want_to_read.insert(shard_id_t(0)); + shard_id_map chunks(lrc.get_chunk_count()); + chunks[shard_id_t(1)] = encoded[shard_id_t(1)]; + chunks[shard_id_t(3)] = encoded[shard_id_t(3)]; + chunks[shard_id_t(4)] = encoded[shard_id_t(4)]; + chunks[shard_id_t(5)] = encoded[shard_id_t(5)]; + chunks[shard_id_t(6)] = encoded[shard_id_t(6)]; + chunks[shard_id_t(7)] = encoded[shard_id_t(7)]; + shard_id_set available_chunks; + available_chunks.insert(shard_id_t(1)); + available_chunks.insert(shard_id_t(3)); + available_chunks.insert(shard_id_t(4)); + available_chunks.insert(shard_id_t(5)); + available_chunks.insert(shard_id_t(6)); + available_chunks.insert(shard_id_t(7)); + shard_id_set minimum; EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum)); EXPECT_EQ(4U, minimum.size()); - EXPECT_EQ(1U, minimum.count(1)); - EXPECT_EQ(1U, minimum.count(4)); - EXPECT_EQ(1U, minimum.count(5)); - EXPECT_EQ(1U, minimum.count(6)); + EXPECT_EQ(1U, minimum.count(shard_id_t(1))); + EXPECT_EQ(1U, minimum.count(shard_id_t(4))); + EXPECT_EQ(1U, minimum.count(shard_id_t(5))); + EXPECT_EQ(1U, minimum.count(shard_id_t(6))); - map decoded; + shard_id_map decoded(lrc.get_chunk_count()); EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded)); string s(chunk_size, 'A'); - EXPECT_EQ(s, string(decoded[0].c_str(), chunk_size)); + EXPECT_EQ(s, string(decoded[shard_id_t(0)].c_str(), chunk_size)); } { - set want_to_read; + shard_id_set want_to_read; for (unsigned int i = 0; i < lrc.get_chunk_count(); i++) - want_to_read.insert(i); - map chunks; - chunks[1] = encoded[1]; - chunks[3] = encoded[3]; - chunks[5] = encoded[5]; - chunks[6] = encoded[6]; - chunks[7] = encoded[7]; - set available_chunks; - available_chunks.insert(1); - available_chunks.insert(3); - available_chunks.insert(5); - available_chunks.insert(6); - available_chunks.insert(7); - set minimum; + want_to_read.insert(shard_id_t(i)); + shard_id_map chunks(lrc.get_chunk_count()); + chunks[shard_id_t(1)] = encoded[shard_id_t(1)]; + chunks[shard_id_t(3)] = encoded[shard_id_t(3)]; + chunks[shard_id_t(5)] = encoded[shard_id_t(5)]; + chunks[shard_id_t(6)] = encoded[shard_id_t(6)]; + chunks[shard_id_t(7)] = encoded[shard_id_t(7)]; + shard_id_set available_chunks; + available_chunks.insert(shard_id_t(1)); + available_chunks.insert(shard_id_t(3)); + available_chunks.insert(shard_id_t(5)); + available_chunks.insert(shard_id_t(6)); + available_chunks.insert(shard_id_t(7)); + shard_id_set minimum; EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum)); EXPECT_EQ(5U, minimum.size()); - EXPECT_EQ(1U, minimum.count(1)); - EXPECT_EQ(1U, minimum.count(3)); - EXPECT_EQ(1U, minimum.count(5)); - EXPECT_EQ(1U, minimum.count(6)); - EXPECT_EQ(1U, minimum.count(7)); + EXPECT_EQ(1U, minimum.count(shard_id_t(1))); + EXPECT_EQ(1U, minimum.count(shard_id_t(3))); + EXPECT_EQ(1U, minimum.count(shard_id_t(5))); + EXPECT_EQ(1U, minimum.count(shard_id_t(6))); + EXPECT_EQ(1U, minimum.count(shard_id_t(7))); - map decoded; + shard_id_map decoded(lrc.get_chunk_count()); EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded)); { string s(chunk_size, 'A'); - EXPECT_EQ(s, string(decoded[0].c_str(), chunk_size)); + EXPECT_EQ(s, string(decoded[shard_id_t(0)].c_str(), chunk_size)); } { string s(chunk_size, 'B'); - EXPECT_EQ(s, string(decoded[1].c_str(), chunk_size)); + EXPECT_EQ(s, string(decoded[shard_id_t(1)].c_str(), chunk_size)); } { string s(chunk_size, 'C'); - EXPECT_EQ(s, string(decoded[4].c_str(), chunk_size)); + EXPECT_EQ(s, string(decoded[shard_id_t(4)].c_str(), chunk_size)); } { string s(chunk_size, 'D'); - EXPECT_EQ(s, string(decoded[5].c_str(), chunk_size)); + EXPECT_EQ(s, string(decoded[shard_id_t(5)].c_str(), chunk_size)); } } { - set want_to_read; + shard_id_set want_to_read; for (unsigned int i = 0; i < lrc.get_chunk_count(); i++) - want_to_read.insert(i); - map chunks; - chunks[1] = encoded[1]; - chunks[3] = encoded[3]; - chunks[5] = encoded[5]; - chunks[6] = encoded[6]; - chunks[7] = encoded[7]; - set available_chunks; - available_chunks.insert(1); - available_chunks.insert(3); - available_chunks.insert(5); - available_chunks.insert(6); - available_chunks.insert(7); - set minimum; + want_to_read.insert(shard_id_t(i)); + shard_id_map chunks(lrc.get_chunk_count()); + chunks[shard_id_t(1)] = encoded[shard_id_t(1)]; + chunks[shard_id_t(3)] = encoded[shard_id_t(3)]; + chunks[shard_id_t(5)] = encoded[shard_id_t(5)]; + chunks[shard_id_t(6)] = encoded[shard_id_t(6)]; + chunks[shard_id_t(7)] = encoded[shard_id_t(7)]; + shard_id_set available_chunks; + available_chunks.insert(shard_id_t(1)); + available_chunks.insert(shard_id_t(3)); + available_chunks.insert(shard_id_t(5)); + available_chunks.insert(shard_id_t(6)); + available_chunks.insert(shard_id_t(7)); + shard_id_set minimum; EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum)); EXPECT_EQ(5U, minimum.size()); - EXPECT_EQ(1U, minimum.count(1)); - EXPECT_EQ(1U, minimum.count(3)); - EXPECT_EQ(1U, minimum.count(5)); - EXPECT_EQ(1U, minimum.count(6)); - EXPECT_EQ(1U, minimum.count(7)); + EXPECT_EQ(1U, minimum.count(shard_id_t(1))); + EXPECT_EQ(1U, minimum.count(shard_id_t(3))); + EXPECT_EQ(1U, minimum.count(shard_id_t(5))); + EXPECT_EQ(1U, minimum.count(shard_id_t(6))); + EXPECT_EQ(1U, minimum.count(shard_id_t(7))); - map decoded; + shard_id_map decoded(lrc.get_chunk_count()); EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded)); { string s(chunk_size, 'A'); - EXPECT_EQ(s, string(decoded[0].c_str(), chunk_size)); + EXPECT_EQ(s, string(decoded[shard_id_t(0)].c_str(), chunk_size)); } { string s(chunk_size, 'B'); - EXPECT_EQ(s, string(decoded[1].c_str(), chunk_size)); + EXPECT_EQ(s, string(decoded[shard_id_t(1)].c_str(), chunk_size)); } { string s(chunk_size, 'C'); - EXPECT_EQ(s, string(decoded[4].c_str(), chunk_size)); + EXPECT_EQ(s, string(decoded[shard_id_t(4)].c_str(), chunk_size)); } { string s(chunk_size, 'D'); - EXPECT_EQ(s, string(decoded[5].c_str(), chunk_size)); + EXPECT_EQ(s, string(decoded[shard_id_t(5)].c_str(), chunk_size)); } } { - set want_to_read; - want_to_read.insert(6); - map chunks; - chunks[0] = encoded[0]; - chunks[1] = encoded[1]; - chunks[3] = encoded[3]; - chunks[5] = encoded[5]; - chunks[7] = encoded[7]; - set available_chunks; - available_chunks.insert(0); - available_chunks.insert(1); - available_chunks.insert(3); - available_chunks.insert(5); - available_chunks.insert(7); - set minimum; + shard_id_set want_to_read; + want_to_read.insert(shard_id_t(6)); + shard_id_map chunks(lrc.get_chunk_count()); + chunks[shard_id_t(0)] = encoded[shard_id_t(0)]; + chunks[shard_id_t(1)] = encoded[shard_id_t(1)]; + chunks[shard_id_t(3)] = encoded[shard_id_t(3)]; + chunks[shard_id_t(5)] = encoded[shard_id_t(5)]; + chunks[shard_id_t(7)] = encoded[shard_id_t(7)]; + shard_id_set available_chunks; + available_chunks.insert(shard_id_t(0)); + available_chunks.insert(shard_id_t(1)); + available_chunks.insert(shard_id_t(3)); + available_chunks.insert(shard_id_t(5)); + available_chunks.insert(shard_id_t(7)); + shard_id_set minimum; EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum)); EXPECT_EQ(available_chunks, minimum); - map decoded; + shard_id_map decoded(lrc.get_chunk_count()); EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded)); } } diff --git a/src/test/erasure-code/TestErasureCodePlugins.cc b/src/test/erasure-code/TestErasureCodePlugins.cc index dac02af88eb..0941a07cf5f 100644 --- a/src/test/erasure-code/TestErasureCodePlugins.cc +++ b/src/test/erasure-code/TestErasureCodePlugins.cc @@ -5,8 +5,6 @@ */ #include #include -#include "arch/probe.h" -#include "arch/intel.h" #include "erasure-code/ErasureCodePlugin.h" #include "global/global_context.h" #include "common/config_proxy.h" @@ -88,33 +86,32 @@ TEST_P(PluginTest,Initialize) TEST_P(PluginTest,PartialRead) { initialize(); - set want_to_encode; - for (unsigned int i = 0 ; i < get_k_plus_m(); i++) { + shard_id_set want_to_encode; + for (shard_id_t i; i < get_k_plus_m(); ++i) { want_to_encode.insert(i); } - // Test erasure code is systematic and that the data - // order is described by get_chunk_mapping(). + // Test erasure code is systematic and that the data order is described by + // get_chunk_mapping(). // - // Create a buffer and encode it. Compare the - // encoded shards of data with the equivalent - // range of the buffer. + // Create a buffer and encode it. Compare the encoded shards of data with the + // equivalent range of the buffer. // - // If there are no differences the plugin should - // report that it supports PARTIAL_READ_OPTIMIZATION + // If there are no differences the plugin should report that it supports + // PARTIAL_READ_OPTIMIZATION bufferlist bl; for (unsigned int i = 0; i < get_k(); i++) { generate_chunk(bl); } - map encoded; + shard_id_map encoded(get_k_plus_m()); erasure_code->encode(want_to_encode, bl, &encoded); - std::vector chunk_mapping = erasure_code->get_chunk_mapping(); + std::vector chunk_mapping = erasure_code->get_chunk_mapping(); bool different = false; - for (unsigned int i = 0; i < get_k_plus_m(); i++) { + for (shard_id_t i; i < get_k_plus_m(); ++i) { EXPECT_EQ(chunk_size, encoded[i].length()); - unsigned int index = (chunk_mapping.size() > i) ? chunk_mapping[i] : i; + shard_id_t index = (chunk_mapping.size() > i) ? chunk_mapping[int(i)] : i; if (i < get_k()) { bufferlist expects; - expects.substr_of(bl, i * chunk_size, chunk_size); + expects.substr_of(bl, int(i) * chunk_size, chunk_size); if (expects != encoded[index]) { different = true; } @@ -135,29 +132,24 @@ TEST_P(PluginTest,PartialRead) TEST_P(PluginTest,PartialWrite) { initialize(); - set want_to_encode; - for (unsigned int i = 0 ; i < get_k_plus_m(); i++) { + shard_id_set want_to_encode; + for (shard_id_t i; i < get_k_plus_m(); ++i) { want_to_encode.insert(i); } // Test erasure code can perform partial writes // - // Create buffer 1 that consists of 3 randomly - // generated chunks for each shard + // Create buffer 1 that consists of 3 randomly generated chunks for each shard // - // Create buffer 2 that has a different middle - // chunk for each shard + // Create buffer 2 that has a different middle chunk for each shard // - // Create buffer 3 that just has the 1 different - // middle chunk for each shard + // Create buffer 3 that just has the 1 different middle chunk for each shard // - // encoded the 3 buffers. Check if the first and - // last chunk of encoded shard buffer 1 and 2 are - // the same. Check if the midle chunk of encoded - // shard buffer 2 is the same as encoded shard - // buffer 3. + // encoded the 3 buffers. Check if the first and last chunk of encoded shard + // buffer 1 and 2 are the same. Check if the midle chunk of encoded shard + // buffer 2 is the same as encoded shard buffer 3. // - // If there are no differences the plugin should - // report that it supports PARTIAL_WRITE_OPTIMIZATION + // If there are no differences the plugin should report that it supports + // PARTIAL_WRITE_OPTIMIZATION bufferlist bl1; bufferlist bl2; bufferlist bl3; @@ -178,14 +170,14 @@ TEST_P(PluginTest,PartialWrite) bl2.append(b3); bl3.append(c2); } - map encoded1; + shard_id_map encoded1(get_k_plus_m()); erasure_code->encode(want_to_encode, bl1, &encoded1); - map encoded2; + shard_id_map encoded2(get_k_plus_m()); erasure_code->encode(want_to_encode, bl2, &encoded2); - map encoded3; + shard_id_map encoded3(get_k_plus_m()); erasure_code->encode(want_to_encode, bl3, &encoded3); bool different = false; - for (unsigned int i = 0; i < get_k_plus_m(); i++) { + for (shard_id_t i; i < get_k_plus_m(); ++i) { EXPECT_EQ(chunk_size*3, encoded1[i].length()); EXPECT_EQ(chunk_size*3, encoded2[i].length()); EXPECT_EQ(chunk_size, encoded3[i].length()); @@ -227,8 +219,8 @@ TEST_P(PluginTest,PartialWrite) TEST_P(PluginTest,ZeroInZeroOut) { initialize(); - set want_to_encode; - for (unsigned int i = 0 ; i < get_k_plus_m(); i++) { + shard_id_set want_to_encode; + for (shard_id_t i; i < get_k_plus_m(); ++i) { want_to_encode.insert(i); } // Test erasure code generates zeros for coding parity if data chunks are zeros @@ -242,12 +234,12 @@ TEST_P(PluginTest,ZeroInZeroOut) for (unsigned int i = 0; i < get_k(); i++) { generate_chunk(bl, 0); } - map encoded; + shard_id_map encoded(get_k_plus_m()); erasure_code->encode(want_to_encode, bl, &encoded); bool different = false; bufferlist expects; generate_chunk(expects, 0); - for (unsigned int i = 0; i < get_k_plus_m(); i++) { + for (shard_id_t i; i < get_k_plus_m(); ++i) { EXPECT_EQ(chunk_size, encoded[i].length()); if (expects != encoded[i]) { different = true; @@ -274,22 +266,22 @@ TEST_P(PluginTest,ParityDelta_SingleDeltaSingleParity) // 3. Test that EncodeDelta generates the expected delta when given the // original data chunk and the new data chunk. // 4. Do a second full write with the new chunk. - // 5. Test that ApplyDelta correctly applies the delta to the original parity chunk - // and returns the same new parity chunk as the second full write. + // 5. Test that ApplyDelta correctly applies the delta to the original parity + // chunk and returns the same new parity chunk as the second full write. initialize(); if (!(erasure_code->get_supported_optimizations() & ErasureCodeInterface::FLAG_EC_PLUGIN_PARITY_DELTA_OPTIMIZATION)) { GTEST_SKIP() << "Plugin does not support parity delta optimization"; } - set want_to_encode; - for (unsigned int i = 0 ; i < get_k_plus_m(); i++) { + shard_id_set want_to_encode; + for (shard_id_t i ; i < get_k_plus_m(); ++i) { want_to_encode.insert(i); } bufferlist old_bl; - for (unsigned int i = 0; i < get_k(); i++) { + for (unsigned int i = 0; i < get_k(); ++i) { generate_chunk(old_bl); } - map old_encoded; + shard_id_map old_encoded(get_k_plus_m()); erasure_code->encode(want_to_encode, old_bl, &old_encoded); bufferlist new_chunk_bl; @@ -298,10 +290,10 @@ TEST_P(PluginTest,ParityDelta_SingleDeltaSingleParity) random_device rand; mt19937 gen(rand()); uniform_int_distribution<> chunk_range(0, get_k()-1); - unsigned int random_chunk = chunk_range(gen); + shard_id_t random_chunk(chunk_range(gen)); ceph::bufferptr old_data = buffer::create_aligned(chunk_size, 4096); - old_bl.begin(random_chunk * chunk_size).copy(chunk_size, old_data.c_str()); + old_bl.begin(int(random_chunk) * chunk_size).copy(chunk_size, old_data.c_str()); ceph::bufferptr new_data = new_chunk_bl.front(); ceph::bufferptr delta = buffer::create_aligned(chunk_size, 4096); ceph::bufferptr expected_delta = buffer::create_aligned(chunk_size, 4096); @@ -321,17 +313,17 @@ TEST_P(PluginTest,ParityDelta_SingleDeltaSingleParity) EXPECT_EQ(delta_matches, true); uniform_int_distribution<> parity_range(get_k(), get_k_plus_m()-1); - unsigned int random_parity = parity_range(gen); + shard_id_t random_parity(parity_range(gen)); ceph::bufferptr old_parity = buffer::create_aligned(chunk_size, 4096); old_encoded[random_parity].begin(0).copy(chunk_size, old_parity.c_str()); - map new_encoded; + shard_id_map new_encoded(get_k_plus_m()); bufferlist new_bl; for (auto i = old_encoded.begin(); i != old_encoded.end(); i++) { if ((unsigned int)i->first >= get_k()) { continue; } - if ((unsigned int)i->first == random_chunk) { + if (i->first == random_chunk) { new_bl.append(new_data); } else { @@ -343,12 +335,12 @@ TEST_P(PluginTest,ParityDelta_SingleDeltaSingleParity) ceph::bufferptr expected_parity = buffer::create_aligned(chunk_size, 4096); new_encoded[random_parity].begin().copy_deep(chunk_size, expected_parity); - map in_map; + shard_id_map in_map(get_k_plus_m()); in_map[random_chunk] = delta; in_map[random_parity] = old_parity; - map out_map; + shard_id_map out_map(get_k_plus_m()); out_map[random_parity] = old_parity; - erasure_code->apply_delta((const map)in_map, out_map); + erasure_code->apply_delta(in_map, out_map); bool parity_matches = true; for (int i = 0; i < chunk_size; i++) { @@ -377,8 +369,8 @@ TEST_P(PluginTest,ParityDelta_MultipleDeltaMultipleParity) ErasureCodeInterface::FLAG_EC_PLUGIN_PARITY_DELTA_OPTIMIZATION)) { GTEST_SKIP() << "Plugin does not support parity delta optimization"; } - set want_to_encode; - for (unsigned int i = 0 ; i < get_k_plus_m(); i++) { + shard_id_set want_to_encode; + for (shard_id_t i ; i < get_k_plus_m(); ++i) { want_to_encode.insert(i); } @@ -386,14 +378,14 @@ TEST_P(PluginTest,ParityDelta_MultipleDeltaMultipleParity) for (unsigned int i = 0; i < get_k(); i++) { generate_chunk(old_bl); } - map old_encoded; + shard_id_map old_encoded(get_k_plus_m()); erasure_code->encode(want_to_encode, old_bl, &old_encoded); bufferlist new_bl; for (unsigned int i = 0; i < get_k(); i++) { generate_chunk(new_bl); } - map new_encoded; + shard_id_map new_encoded(get_k_plus_m()); erasure_code->encode(want_to_encode, new_bl, &new_encoded); ceph::bufferptr old_data = buffer::create_aligned(chunk_size*get_k(), 4096); @@ -418,25 +410,25 @@ TEST_P(PluginTest,ParityDelta_MultipleDeltaMultipleParity) } EXPECT_EQ(delta_matches, true); - map in_map; - map out_map; - for (unsigned int i = 0; i < get_k(); i++) { + shard_id_map in_map(get_k_plus_m()); + shard_id_map out_map(get_k_plus_m()); + for (shard_id_t i; i < get_k(); ++i) { ceph::bufferptr tmp = buffer::create_aligned(chunk_size, 4096); - delta.copy_out(chunk_size * i, chunk_size, tmp.c_str()); + delta.copy_out(chunk_size * int(i), chunk_size, tmp.c_str()); in_map[i] = tmp; } - for (unsigned int i = get_k(); i < get_k_plus_m(); i++) { + for (shard_id_t i(get_k()); i < get_k_plus_m(); ++i) { ceph::bufferptr tmp = buffer::create_aligned(chunk_size, 4096); old_encoded[i].begin().copy(chunk_size, tmp.c_str()); in_map[i] = tmp; out_map[i] = tmp; } - erasure_code->apply_delta((const map)in_map, out_map); + erasure_code->apply_delta(in_map, out_map); bool parity_matches = true; - for (unsigned int i = get_k(); i < get_k_plus_m(); i++) { + for (shard_id_t i(get_k()); i < get_k_plus_m(); ++i) { for (int j = 0; j < chunk_size; j++) { if (out_map[i].c_str()[j] != new_encoded[i].c_str()[j]) { parity_matches = false; @@ -577,21 +569,22 @@ INSTANTIATE_TEST_SUITE_P( "plugin=jerasure technique=liber8tion k=4 m=2 packetsize=32", "plugin=jerasure technique=liber8tion k=5 m=2 packetsize=32", "plugin=jerasure technique=liber8tion k=6 m=2 packetsize=32", - "plugin=clay k=2 m=1", - "plugin=clay k=3 m=1", - "plugin=clay k=4 m=1", - "plugin=clay k=5 m=1", - "plugin=clay k=6 m=1", - "plugin=clay k=2 m=2", - "plugin=clay k=3 m=2", - "plugin=clay k=4 m=2", - "plugin=clay k=5 m=2", - "plugin=clay k=6 m=2", - "plugin=clay k=2 m=3", - "plugin=clay k=3 m=3", - "plugin=clay k=4 m=3", - "plugin=clay k=5 m=3", - "plugin=clay k=6 m=3", + // Disabling clay for now. Needs more testing with optimized EC. + // "plugin=clay k=2 m=1", + // "plugin=clay k=3 m=1", + // "plugin=clay k=4 m=1", + // "plugin=clay k=5 m=1", + // "plugin=clay k=6 m=1", + // "plugin=clay k=2 m=2", + // "plugin=clay k=3 m=2", + // "plugin=clay k=4 m=2", + // "plugin=clay k=5 m=2", + // "plugin=clay k=6 m=2", + // "plugin=clay k=2 m=3", + // "plugin=clay k=3 m=3", + // "plugin=clay k=4 m=3", + // "plugin=clay k=5 m=3", + // "plugin=clay k=6 m=3", "plugin=shec technique=single k=2 m=1 c=1", "plugin=shec technique=single k=3 m=1 c=1", "plugin=shec technique=single k=4 m=1 c=1", diff --git a/src/test/erasure-code/TestErasureCodeShec.cc b/src/test/erasure-code/TestErasureCodeShec.cc index 6b901dc6f30..e111352e17a 100644 --- a/src/test/erasure-code/TestErasureCodeShec.cc +++ b/src/test/erasure-code/TestErasureCodeShec.cc @@ -969,15 +969,15 @@ TEST(ErasureCodeShec, minimum_to_decode_8) shec->init(*profile, &cerr); //minimum_to_decode - set want_to_decode; - set available_chunks; - set minimum_chunks; + shard_id_set want_to_decode; + shard_id_set available_chunks; + shard_id_set minimum_chunks; for (int i = 0; i < 8; ++i) { - want_to_decode.insert(i); + want_to_decode.insert(shard_id_t(i)); } for (int i = 0; i < 5; ++i) { - available_chunks.insert(i); + available_chunks.insert(shard_id_t(i)); } int r = shec->_minimum_to_decode(want_to_decode, available_chunks, @@ -1005,15 +1005,15 @@ TEST(ErasureCodeShec, minimum_to_decode_9) shec->init(*profile, &cerr); //minimum_to_decode - set want_to_decode; - set available_chunks; - set minimum_chunks; + shard_id_set want_to_decode; + shard_id_set available_chunks; + shard_id_set minimum_chunks; for (int i = 0; i < 4; ++i) { - want_to_decode.insert(i); + want_to_decode.insert(shard_id_t(i)); } for (int i = 0; i < 8; ++i) { - available_chunks.insert(i); + available_chunks.insert(shard_id_t(i)); } int r = shec->_minimum_to_decode(want_to_decode, available_chunks, @@ -1041,15 +1041,15 @@ TEST(ErasureCodeShec, minimum_to_decode_10) shec->init(*profile, &cerr); //minimum_to_decode - set want_to_decode; - set available_chunks; - set minimum_chunks; + shard_id_set want_to_decode; + shard_id_set available_chunks; + shard_id_set minimum_chunks; for (int i = 0; i < 7; ++i) { - want_to_decode.insert(i); + want_to_decode.insert(shard_id_t(i)); } for (int i = 4; i < 7; ++i) { - available_chunks.insert(i); + available_chunks.insert(shard_id_t(i)); } int r = shec->_minimum_to_decode(want_to_decode, available_chunks, @@ -1077,15 +1077,15 @@ TEST(ErasureCodeShec, minimum_to_decode_11) shec->init(*profile, &cerr); //minimum_to_decode - set want_to_decode; - set available_chunks; - set minimum_chunks; + shard_id_set want_to_decode; + shard_id_set available_chunks; + shard_id_set minimum_chunks; for (int i = 0; i < 5; ++i) { - want_to_decode.insert(i); + want_to_decode.insert(shard_id_t(i)); } for (int i = 4; i < 7; ++i) { - available_chunks.insert(i); + available_chunks.insert(shard_id_t(i)); } int r = shec->_minimum_to_decode(want_to_decode, available_chunks, @@ -1113,13 +1113,13 @@ TEST(ErasureCodeShec, minimum_to_decode_12) shec->init(*profile, &cerr); //minimum_to_decode - set want_to_decode; - set available_chunks; + shard_id_set want_to_decode; + shard_id_set available_chunks; //minimum_chunks is NULL for (int i = 0; i < 7; ++i) { - want_to_decode.insert(i); - available_chunks.insert(i); + want_to_decode.insert(shard_id_t(i)); + available_chunks.insert(shard_id_t(i)); } int r = shec->_minimum_to_decode(want_to_decode, available_chunks, NULL); @@ -1146,18 +1146,18 @@ TEST(ErasureCodeShec, minimum_to_decode_13) shec->init(*profile, &cerr); //minimum_to_decode - set want_to_decode; - set available_chunks; - set minimum_chunks, minimum; + shard_id_set want_to_decode; + shard_id_set available_chunks; + shard_id_set minimum_chunks, minimum; for (int i = 0; i < 7; ++i) { - want_to_decode.insert(i); - available_chunks.insert(i); + want_to_decode.insert(shard_id_t(i)); + available_chunks.insert(shard_id_t(i)); } shec->_minimum_to_decode(want_to_decode, available_chunks, &minimum_chunks); minimum = minimum_chunks; //normal value for (int i = 100; i < 120; ++i) { - minimum_chunks.insert(i); //insert extra data + minimum_chunks.insert(shard_id_t(i)); //insert extra data } int r = shec->_minimum_to_decode(want_to_decode, available_chunks, @@ -1187,14 +1187,14 @@ TEST(ErasureCodeShec, minimum_to_decode2_1) shec->init(*profile, &cerr); //minimum_to_decode - set want_to_decode; - set available_chunks; - set minimum_chunks; + shard_id_set want_to_decode; + shard_id_set available_chunks; + shard_id_set minimum_chunks; - want_to_decode.insert(0); - available_chunks.insert(0); - available_chunks.insert(1); - available_chunks.insert(2); + want_to_decode.insert(shard_id_t(0)); + available_chunks.insert(shard_id_t(0)); + available_chunks.insert(shard_id_t(1)); + available_chunks.insert(shard_id_t(2)); int r = shec->_minimum_to_decode(want_to_decode, available_chunks, &minimum_chunks); @@ -1223,16 +1223,16 @@ TEST(ErasureCodeShec, minimum_to_decode2_3) shec->init(*profile, &cerr); //minimum_to_decode - set want_to_decode; - set available_chunks; - set minimum_chunks; + shard_id_set want_to_decode; + shard_id_set available_chunks; + shard_id_set minimum_chunks; - want_to_decode.insert(0); - want_to_decode.insert(2); - available_chunks.insert(0); - available_chunks.insert(1); - available_chunks.insert(2); - available_chunks.insert(3); + want_to_decode.insert(shard_id_t(0)); + want_to_decode.insert(shard_id_t(2)); + available_chunks.insert(shard_id_t(0)); + available_chunks.insert(shard_id_t(1)); + available_chunks.insert(shard_id_t(2)); + available_chunks.insert(shard_id_t(3)); pthread_t tid; g_flag = 0; @@ -1272,13 +1272,13 @@ TEST(ErasureCodeShec, minimum_to_decode_with_cost_1) shec->init(*profile, &cerr); //minimum_to_decode_with_cost - set want_to_decode; - map available_chunks; - set minimum_chunks; + shard_id_set want_to_decode; + shard_id_map available_chunks(shec->get_chunk_count()); + shard_id_set minimum_chunks; for (int i = 0; i < 7; ++i) { - want_to_decode.insert(i); - available_chunks.insert(make_pair(i, i)); + want_to_decode.insert(shard_id_t(i)); + available_chunks.insert(shard_id_t(i), i); } int r = shec->minimum_to_decode_with_cost(want_to_decode, available_chunks, @@ -1308,16 +1308,16 @@ TEST(ErasureCodeShec, minimum_to_decode_with_cost_2_3) shec->init(*profile, &cerr); //minimum_to_decode_with_cost - set want_to_decode; - map available_chunks; - set minimum_chunks; + shard_id_set want_to_decode; + shard_id_map available_chunks(shec->get_chunk_count()); + shard_id_set minimum_chunks; - want_to_decode.insert(0); - want_to_decode.insert(2); - available_chunks[0] = 0; - available_chunks[1] = 1; - available_chunks[2] = 2; - available_chunks[3] = 3; + want_to_decode.insert(shard_id_t(0)); + want_to_decode.insert(shard_id_t(2)); + available_chunks[shard_id_t(0)] = 0; + available_chunks[shard_id_t(1)] = 1; + available_chunks[shard_id_t(2)] = 2; + available_chunks[shard_id_t(3)] = 3; pthread_t tid; g_flag = 0; @@ -1340,6 +1340,7 @@ TEST(ErasureCodeShec, minimum_to_decode_with_cost_2_3) delete profile; } +IGNORE_DEPRECATED TEST(ErasureCodeShec, encode_1) { //init @@ -2821,3 +2822,4 @@ void* thread5(void* pParam) return NULL; } +END_IGNORE_DEPRECATED \ No newline at end of file diff --git a/src/test/erasure-code/TestErasureCodeShec_all.cc b/src/test/erasure-code/TestErasureCodeShec_all.cc index 401b8affc71..c0abbe8d244 100644 --- a/src/test/erasure-code/TestErasureCodeShec_all.cc +++ b/src/test/erasure-code/TestErasureCodeShec_all.cc @@ -50,8 +50,8 @@ struct Recover_d { int k; int m; int c; - set want; - set avail; + shard_id_set want; + shard_id_set avail; }; struct std::vector cannot_recover; @@ -59,6 +59,7 @@ class ParameterTest : public ::testing::TestWithParam { }; +IGNORE_DEPRECATED TEST_P(ParameterTest, parameter_all) { int result; @@ -99,7 +100,7 @@ TEST_P(ParameterTest, parameter_all) //minimum_to_decode //want_to_decode will be a combination that chooses 1~c from k+m - set want_to_decode, available_chunks, minimum_chunks; + shard_id_set want_to_decode, available_chunks, minimum_chunks; int array_want_to_decode[shec->get_chunk_count()]; struct Recover_d comb; @@ -115,12 +116,12 @@ TEST_P(ParameterTest, parameter_all) do { for (unsigned int i = 0; i < shec->get_chunk_count(); i++) { - available_chunks.insert(i); + available_chunks.insert(shard_id_t(i)); } for (unsigned int i = 0; i < shec->get_chunk_count(); i++) { if (array_want_to_decode[i]) { - want_to_decode.insert(i); - available_chunks.erase(i); + want_to_decode.insert(shard_id_t(i)); + available_chunks.erase(shard_id_t(i)); } } @@ -152,14 +153,14 @@ TEST_P(ParameterTest, parameter_all) } //minimum_to_decode_with_cost - set want_to_decode_with_cost, minimum_chunks_with_cost; - map available_chunks_with_cost; + shard_id_set want_to_decode_with_cost, minimum_chunks_with_cost; + shard_id_map available_chunks_with_cost(shec->get_chunk_count()); for (unsigned int i = 0; i < 1; i++) { - want_to_decode_with_cost.insert(i); + want_to_decode_with_cost.insert(shard_id_t(i)); } for (unsigned int i = 0; i < shec->get_chunk_count(); i++) { - available_chunks_with_cost[i] = i; + available_chunks_with_cost[shard_id_t(i)] = i; } result = shec->minimum_to_decode_with_cost( @@ -261,6 +262,7 @@ TEST_P(ParameterTest, parameter_all) delete profile; delete crush; } +END_IGNORE_DEPRECATED INSTANTIATE_TEST_SUITE_P(Test, ParameterTest, ::testing::ValuesIn(param)); diff --git a/src/test/erasure-code/TestErasureCodeShec_arguments.cc b/src/test/erasure-code/TestErasureCodeShec_arguments.cc index 075c6383eed..0c3b73f9e8e 100644 --- a/src/test/erasure-code/TestErasureCodeShec_arguments.cc +++ b/src/test/erasure-code/TestErasureCodeShec_arguments.cc @@ -39,7 +39,7 @@ unsigned int count_num = 0; unsigned int unexpected_count = 0; unsigned int value_count = 0; -map,set > > shec_table; +map> shec_table; constexpr int getint(std::initializer_list is) { int a = 0; @@ -50,8 +50,8 @@ constexpr int getint(std::initializer_list is) { } void create_table_shec432() { - set table_key,vec_avails; - set > table_value; + shard_id_set table_key, vec_avails; + set table_value; for (int want_count = 0; want_count < 7; ++want_count) { for (unsigned want = 1; want < (1<<7); ++want) { @@ -63,7 +63,7 @@ void create_table_shec432() { { for (int i = 0; i < 7; ++i) { if (want & (1 << i)) { - table_key.insert(i); + table_key.insert(shard_id_t(i)); } } } @@ -110,7 +110,7 @@ void create_table_shec432() { vec_avails.clear(); for (int j = 0; j < 7; ++j) { if (vec[i] & (1 << j)) { - vec_avails.insert(j); + vec_avails.insert(shard_id_t(j)); } } table_value.insert(vec_avails); @@ -120,17 +120,17 @@ void create_table_shec432() { } } -bool search_table_shec432(set want_to_read, set available_chunks) { - set > tmp; - set settmp; +bool search_table_shec432(shard_id_set want_to_read, shard_id_set available_chunks) { + set tmp; + shard_id_set settmp; bool found; tmp = shec_table.find(want_to_read)->second; - for (set >::iterator itr = tmp.begin();itr != tmp.end(); ++itr) { + for (set::iterator itr = tmp.begin();itr != tmp.end(); ++itr) { found = true; value_count = 0; settmp = *itr; - for (set::iterator setitr = settmp.begin();setitr != settmp.end(); ++setitr) { + for (shard_id_set::const_iterator setitr = settmp.begin();setitr != settmp.end(); ++setitr) { if (!available_chunks.count(*setitr)) { found = false; } @@ -143,6 +143,7 @@ bool search_table_shec432(set want_to_read, set available_chunks) { return false; } +IGNORE_DEPRECATED TEST(ParameterTest, combination_all) { const unsigned int kObjectSize = 128; @@ -345,6 +346,7 @@ TEST(ParameterTest, combination_all) delete shec; delete profile; } +END_IGNORE_DEPRECATED int main(int argc, char **argv) { diff --git a/src/test/erasure-code/TestErasureCodeShec_thread.cc b/src/test/erasure-code/TestErasureCodeShec_thread.cc index c8d7bbb1e9a..56bd2f801ad 100644 --- a/src/test/erasure-code/TestErasureCodeShec_thread.cc +++ b/src/test/erasure-code/TestErasureCodeShec_thread.cc @@ -86,6 +86,7 @@ TEST(ErasureCodeShec, thread) pthread_join(tid5, NULL); } +IGNORE_DEPRECATED void* thread1(void* pParam) { TestParam* param = static_cast(pParam); @@ -217,3 +218,4 @@ void* thread1(void* pParam) return NULL; } +END_IGNORE_DEPRECATED diff --git a/src/test/erasure-code/ceph_erasure_code_benchmark.cc b/src/test/erasure-code/ceph_erasure_code_benchmark.cc index 2849542a177..cc718e167c3 100644 --- a/src/test/erasure-code/ceph_erasure_code_benchmark.cc +++ b/src/test/erasure-code/ceph_erasure_code_benchmark.cc @@ -35,7 +35,6 @@ #include "erasure-code/ErasureCode.h" #include "ceph_erasure_code_benchmark.h" -using std::endl; using std::cerr; using std::cout; using std::map; @@ -111,7 +110,7 @@ int ErasureCodeBench::setup(int argc, char** argv) { std::vector strs; boost::split(strs, *i, boost::is_any_of("=")); if (strs.size() != 2) { - cerr << "--parameter " << *i << " ignored because it does not contain exactly one =" << endl; + cerr << "--parameter " << *i << " ignored because it does not contain exactly one =" << std::endl; } else { profile[strs[0]] = strs[1]; } @@ -136,14 +135,14 @@ int ErasureCodeBench::setup(int argc, char** argv) { m = stoi(profile["m"]); } catch (const std::logic_error& e) { cout << "Invalid k and/or m: k=" << profile["k"] << ", m=" << profile["m"] - << " (" << e.what() << ")" << endl; + << " (" << e.what() << ")" << std::endl; return -EINVAL; } if (k <= 0) { - cout << "parameter k is " << k << ". But k needs to be > 0." << endl; + cout << "parameter k is " << k << ". But k needs to be > 0." << std::endl; return -EINVAL; } else if ( m < 0 ) { - cout << "parameter m is " << m << ". But m needs to be >= 0." << endl; + cout << "parameter m is " << m << ". But m needs to be >= 0." << std::endl; return -EINVAL; } @@ -171,33 +170,33 @@ int ErasureCodeBench::encode() g_conf().get_val("erasure_code_dir"), profile, &erasure_code, &messages); if (code) { - cerr << messages.str() << endl; + cerr << messages.str() << std::endl; return code; } bufferlist in; in.append(string(in_size, 'X')); in.rebuild_aligned(ErasureCode::SIMD_ALIGN); - set want_to_encode; - for (int i = 0; i < k + m; i++) { + shard_id_set want_to_encode; + for (shard_id_t i; i < k + m; ++i) { want_to_encode.insert(i); } utime_t begin_time = ceph_clock_now(); for (int i = 0; i < max_iterations; i++) { - std::map encoded; + shard_id_map encoded(erasure_code->get_chunk_count()); code = erasure_code->encode(want_to_encode, in, &encoded); if (code) return code; } utime_t end_time = ceph_clock_now(); - cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << endl; + cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << std::endl; return 0; } -static void display_chunks(const map &chunks, +static void display_chunks(const shard_id_map &chunks, unsigned int chunk_count) { cout << "chunks "; - for (unsigned int chunk = 0; chunk < chunk_count; chunk++) { + for (shard_id_t chunk; chunk < chunk_count; ++chunk) { if (chunks.count(chunk) == 0) { cout << "(" << chunk << ")"; } else { @@ -205,12 +204,12 @@ static void display_chunks(const map &chunks, } cout << " "; } - cout << "(X) is an erased chunk" << endl; + cout << "(X) is an erased chunk" << std::endl; } -int ErasureCodeBench::decode_erasures(const map &all_chunks, - const map &chunks, - unsigned i, +int ErasureCodeBench::decode_erasures(const shard_id_map &all_chunks, + const shard_id_map &chunks, + shard_id_t shard, unsigned want_erasures, ErasureCodeInterfaceRef erasure_code) { @@ -219,37 +218,37 @@ int ErasureCodeBench::decode_erasures(const map &all_chunks, if (want_erasures == 0) { if (verbose) display_chunks(chunks, erasure_code->get_chunk_count()); - set want_to_read; - for (unsigned int chunk = 0; chunk < erasure_code->get_chunk_count(); chunk++) + shard_id_set want_to_read; + for (shard_id_t chunk; chunk < erasure_code->get_chunk_count(); ++chunk) if (chunks.count(chunk) == 0) want_to_read.insert(chunk); - map decoded; + shard_id_map decoded(erasure_code->get_chunk_count()); code = erasure_code->decode(want_to_read, chunks, &decoded, 0); if (code) return code; - for (set::iterator chunk = want_to_read.begin(); + for (shard_id_set::const_iterator chunk = want_to_read.begin(); chunk != want_to_read.end(); ++chunk) { if (all_chunks.find(*chunk)->second.length() != decoded[*chunk].length()) { cerr << "chunk " << *chunk << " length=" << all_chunks.find(*chunk)->second.length() - << " decoded with length=" << decoded[*chunk].length() << endl; + << " decoded with length=" << decoded[*chunk].length() << std::endl; return -1; } bufferlist tmp = all_chunks.find(*chunk)->second; if (!tmp.contents_equal(decoded[*chunk])) { cerr << "chunk " << *chunk - << " content and recovered content are different" << endl; + << " content and recovered content are different" << std::endl; return -1; } } return 0; } - for (; i < erasure_code->get_chunk_count(); i++) { - map one_less = chunks; - one_less.erase(i); - code = decode_erasures(all_chunks, one_less, i + 1, want_erasures - 1, erasure_code); + for (; shard < erasure_code->get_chunk_count(); ++shard) { + shard_id_map one_less = chunks; + one_less.erase(shard); + code = decode_erasures(all_chunks, one_less, shard + 1, want_erasures - 1, erasure_code); if (code) return code; } @@ -266,7 +265,7 @@ int ErasureCodeBench::decode() g_conf().get_val("erasure_code_dir"), profile, &erasure_code, &messages); if (code) { - cerr << messages.str() << endl; + cerr << messages.str() << std::endl; return code; } @@ -274,54 +273,54 @@ int ErasureCodeBench::decode() in.append(string(in_size, 'X')); in.rebuild_aligned(ErasureCode::SIMD_ALIGN); - set want_to_encode; - for (int i = 0; i < k + m; i++) { + shard_id_set want_to_encode; + for (shard_id_t i; i < k + m; ++i) { want_to_encode.insert(i); } - map encoded; + shard_id_map encoded(erasure_code->get_chunk_count()); code = erasure_code->encode(want_to_encode, in, &encoded); if (code) return code; - set want_to_read = want_to_encode; + shard_id_set want_to_read = want_to_encode; if (erased.size() > 0) { for (vector::const_iterator i = erased.begin(); i != erased.end(); ++i) - encoded.erase(*i); + encoded.erase(shard_id_t(*i)); display_chunks(encoded, erasure_code->get_chunk_count()); } utime_t begin_time = ceph_clock_now(); for (int i = 0; i < max_iterations; i++) { if (exhaustive_erasures) { - code = decode_erasures(encoded, encoded, 0, erasures, erasure_code); + code = decode_erasures(encoded, encoded, shard_id_t(0), erasures, erasure_code); if (code) return code; } else if (erased.size() > 0) { - map decoded; + shard_id_map decoded(erasure_code->get_chunk_count()); code = erasure_code->decode(want_to_read, encoded, &decoded, 0); if (code) return code; } else { - map chunks = encoded; + shard_id_map chunks = encoded; for (int j = 0; j < erasures; j++) { int erasure; do { erasure = rand() % ( k + m ); - } while(chunks.count(erasure) == 0); - chunks.erase(erasure); + } while(chunks.count(shard_id_t(erasure)) == 0); + chunks.erase(shard_id_t(erasure)); } - map decoded; + shard_id_map decoded(erasure_code->get_chunk_count()); code = erasure_code->decode(want_to_read, chunks, &decoded, 0); if (code) return code; } } utime_t end_time = ceph_clock_now(); - cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << endl; + cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << std::endl; return 0; } @@ -333,7 +332,7 @@ int main(int argc, char** argv) { return err; return ecbench.run(); } catch(po::error &e) { - cerr << e.what() << endl; + cerr << e.what() << std::endl; return 1; } } diff --git a/src/test/erasure-code/ceph_erasure_code_benchmark.h b/src/test/erasure-code/ceph_erasure_code_benchmark.h index 59149a74c16..86543e3aa98 100644 --- a/src/test/erasure-code/ceph_erasure_code_benchmark.h +++ b/src/test/erasure-code/ceph_erasure_code_benchmark.h @@ -50,9 +50,9 @@ class ErasureCodeBench { public: int setup(int argc, char** argv); int run(); - int decode_erasures(const std::map &all_chunks, - const std::map &chunks, - unsigned i, + int decode_erasures(const shard_id_map &all_chunks, + const shard_id_map &chunks, + shard_id_t shard, unsigned want_erasures, ErasureCodeInterfaceRef erasure_code); int decode(); diff --git a/src/test/erasure-code/ceph_erasure_code_non_regression.cc b/src/test/erasure-code/ceph_erasure_code_non_regression.cc index de82c53591f..b399d7dd401 100644 --- a/src/test/erasure-code/ceph_erasure_code_non_regression.cc +++ b/src/test/erasure-code/ceph_erasure_code_non_regression.cc @@ -51,10 +51,10 @@ public: int run_create(); int run_check(); int decode_erasures(ErasureCodeInterfaceRef erasure_code, - set erasures, - map chunks); + shard_id_set erasures, + shard_id_map chunks); string content_path(); - string chunk_path(unsigned int chunk); + string chunk_path(shard_id_t chunk); }; int ErasureCodeNonRegression::setup(int argc, char** argv) { @@ -110,7 +110,7 @@ int ErasureCodeNonRegression::setup(int argc, char** argv) { create = vm.count("create") > 0; if (!check && !create) { - cerr << "must specifify either --check, or --create" << endl; + cerr << "must specifify either --check, or --create" << std::endl; return 1; } @@ -128,7 +128,7 @@ int ErasureCodeNonRegression::setup(int argc, char** argv) { std::vector strs; boost::split(strs, *i, boost::is_any_of("=")); if (strs.size() != 2) { - cerr << "--parameter " << *i << " ignored because it does not contain exactly one =" << endl; + cerr << "--parameter " << *i << " ignored because it does not contain exactly one =" << std::endl; } else { profile[strs[0]] = strs[1]; } @@ -158,12 +158,12 @@ int ErasureCodeNonRegression::run_create() g_conf().get_val("erasure_code_dir"), profile, &erasure_code, &messages); if (code) { - cerr << messages.str() << endl; + cerr << messages.str() << std::endl; return code; } if (::mkdir(directory.c_str(), 0755)) { - cerr << "mkdir(" << directory << "): " << cpp_strerror(errno) << endl; + cerr << "mkdir(" << directory << "): " << cpp_strerror(errno) << std::endl; return 1; } unsigned payload_chunk_size = 37; @@ -177,15 +177,15 @@ int ErasureCodeNonRegression::run_create() in.splice(stripe_width, in.length() - stripe_width); if (in.write_file(content_path().c_str())) return 1; - set want_to_encode; - for (unsigned int i = 0; i < erasure_code->get_chunk_count(); i++) { + shard_id_set want_to_encode; + for (shard_id_t i; i < erasure_code->get_chunk_count(); ++i) { want_to_encode.insert(i); } - map encoded; + shard_id_map encoded(erasure_code->get_chunk_count()); code = erasure_code->encode(want_to_encode, in, &encoded); if (code) return code; - for (map::iterator chunk = encoded.begin(); + for (shard_id_map::iterator chunk = encoded.begin(); chunk != encoded.end(); ++chunk) { if (chunk->second.write_file(chunk_path(chunk->first).c_str())) @@ -195,26 +195,26 @@ int ErasureCodeNonRegression::run_create() } int ErasureCodeNonRegression::decode_erasures(ErasureCodeInterfaceRef erasure_code, - set erasures, - map chunks) + shard_id_set erasures, + shard_id_map chunks) { - map available; - for (map::iterator chunk = chunks.begin(); + shard_id_map available(erasure_code->get_chunk_count()); + for (shard_id_map::iterator chunk = chunks.begin(); chunk != chunks.end(); ++chunk) { if (erasures.count(chunk->first) == 0) available[chunk->first] = chunk->second; } - map decoded; + shard_id_map decoded(erasure_code->get_chunk_count()); int code = erasure_code->decode(erasures, available, &decoded, available.begin()->second.length()); if (code) return code; - for (set::iterator erasure = erasures.begin(); + for (shard_id_set::const_iterator erasure = erasures.begin(); erasure != erasures.end(); ++erasure) { if (!chunks[*erasure].contents_equal(decoded[*erasure])) { - cerr << "chunk " << *erasure << " incorrectly recovered" << endl; + cerr << "chunk " << *erasure << " incorrectly recovered" << std::endl; return 1; } } @@ -230,45 +230,45 @@ int ErasureCodeNonRegression::run_check() g_conf().get_val("erasure_code_dir"), profile, &erasure_code, &messages); if (code) { - cerr << messages.str() << endl; + cerr << messages.str() << std::endl; return code; } string errors; bufferlist in; if (in.read_file(content_path().c_str(), &errors)) { - cerr << errors << endl; + cerr << errors << std::endl; return 1; } - set want_to_encode; - for (unsigned int i = 0; i < erasure_code->get_chunk_count(); i++) { + shard_id_set want_to_encode; + for (shard_id_t i; i < erasure_code->get_chunk_count(); ++i) { want_to_encode.insert(i); } - map encoded; + shard_id_map encoded(erasure_code->get_chunk_count()); code = erasure_code->encode(want_to_encode, in, &encoded); if (code) return code; - for (map::iterator chunk = encoded.begin(); + for (shard_id_map::iterator chunk = encoded.begin(); chunk != encoded.end(); ++chunk) { bufferlist existing; if (existing.read_file(chunk_path(chunk->first).c_str(), &errors)) { - cerr << errors << endl; + cerr << errors << std::endl; return 1; } bufferlist &old = chunk->second; if (existing.length() != old.length() || memcmp(existing.c_str(), old.c_str(), old.length())) { - cerr << "chunk " << chunk->first << " encodes differently" << endl; + cerr << "chunk " << chunk->first << " encodes differently" << std::endl; return 1; } } // erasing a single chunk is likely to use a specific code path in every plugin - set erasures; + shard_id_set erasures; erasures.clear(); - erasures.insert(0); + erasures.insert(shard_id_t()); code = decode_erasures(erasure_code, erasures, encoded); if (code) return code; @@ -276,8 +276,8 @@ int ErasureCodeNonRegression::run_check() if (erasure_code->get_chunk_count() - erasure_code->get_data_chunk_count() > 1) { // erasing two chunks is likely to be the general case erasures.clear(); - erasures.insert(0); - erasures.insert(erasure_code->get_chunk_count() - 1); + erasures.insert(shard_id_t()); + erasures.insert(shard_id_t(erasure_code->get_chunk_count() - 1)); code = decode_erasures(erasure_code, erasures, encoded); if (code) return code; @@ -293,7 +293,7 @@ string ErasureCodeNonRegression::content_path() return path.str(); } -string ErasureCodeNonRegression::chunk_path(unsigned int chunk) +string ErasureCodeNonRegression::chunk_path(shard_id_t chunk) { stringstream path; path << directory << "/" << chunk; diff --git a/src/tools/erasure-code/ceph-erasure-code-tool.cc b/src/tools/erasure-code/ceph-erasure-code-tool.cc index ef6c91902a6..9a5dc09100c 100644 --- a/src/tools/erasure-code/ceph-erasure-code-tool.cc +++ b/src/tools/erasure-code/ceph-erasure-code-tool.cc @@ -114,7 +114,7 @@ int do_test_plugin_exists(const std::vector &args) { std::lock_guard l{instance.lock}; int r = instance.load( args[0], g_conf().get_val("erasure_code_dir"), &plugin, &ss); - std::cerr << ss.str() << endl; + std::cerr << ss.str() << std::endl; return r; } @@ -271,8 +271,8 @@ int do_decode(const std::vector &args) { return 1; } auto chunk = static_cast(chunk_mapping.size()) > shard ? - chunk_mapping[shard] : shard; - want_to_read.insert(chunk); + chunk_mapping[shard] : shard_id_t(shard); + want_to_read.insert(static_cast(chunk)); } r = ECUtil::decode(*sinfo, ec_impl, want_to_read, encoded_data, &decoded_data); -- 2.39.5