We have changed the encode_chunks interface to take an in map and an out map.
The in map contains the data shards to be encoded. The out map contains the
empty buffers that the plugin will write the parity data to.
The old non-optimized EC path has been updated to call the new encode_chunks
interface from erasurecode.cc
The new EC optimizations path calls the encode_chunks interface from ECUtils.
Signed-off-by: Jamie Pryde <jamiepry@uk.ibm.com>
Signed-off-by: Alex Ainscow <aainscow@uk.ibm.com>
return 0;
}
-int ErasureCode::chunk_index(unsigned int i) const
+shard_id_t ErasureCode::chunk_index(raw_shard_id_t i) const
{
- return chunk_mapping.size() > i ? chunk_mapping[i] : i;
+ return chunk_mapping.size() > uint64_t(i) ? chunk_mapping[static_cast<int>(i)] : shard_id_t(int8_t(i));
}
+[[deprecated]]
+unsigned int ErasureCode::chunk_index(unsigned int i) const
+{
+ return static_cast<unsigned int>(chunk_mapping.size() > uint64_t(i) ? chunk_mapping[i] : shard_id_t(i));
+}
+
+[[deprecated]]
int ErasureCode::_minimum_to_decode(const set<int> &want_to_read,
const set<int> &available_chunks,
set<int> *minimum)
return 0;
}
+int ErasureCode::_minimum_to_decode(const shard_id_set &want_to_read,
+ const shard_id_set &available_chunks,
+ shard_id_set *minimum)
+{
+ if (available_chunks.includes(want_to_read)) {
+ *minimum = want_to_read;
+ } else {
+ unsigned int k = get_data_chunk_count();
+ if (available_chunks.size() < (unsigned)k)
+ return -EIO;
+ shard_id_set::const_iterator i;
+ unsigned j;
+ for (i = available_chunks.begin(), j = 0; j < (unsigned)k; ++i, j++)
+ minimum->insert(*i);
+ }
+ return 0;
+}
+
+IGNORE_DEPRECATED
+[[deprecated]]
int ErasureCode::minimum_to_decode(const set<int> &want_to_read,
const set<int> &available_chunks,
map<int, vector<pair<int, int>>> *minimum)
}
return 0;
}
+END_IGNORE_DEPRECATED
+
+int ErasureCode::minimum_to_decode(const shard_id_set &want_to_read,
+ const shard_id_set &available_chunks,
+ shard_id_set &minimum_set,
+ shard_id_map<vector<pair<int, int>>> *minimum_sub_chunks)
+{
+ int r = _minimum_to_decode(want_to_read, available_chunks, &minimum_set);
+ if (minimum_sub_chunks == nullptr) return r;
+ if (r != 0) {
+ return r;
+ }
+ vector<pair<int, int>> default_subchunks;
+ default_subchunks.push_back(make_pair(0, get_sub_chunk_count()));
+ for (auto &&id : minimum_set) {
+ minimum_sub_chunks->emplace(id, default_subchunks);
+ }
+ return 0;
+}
+IGNORE_DEPRECATED
+[[deprecated]]
int ErasureCode::minimum_to_decode_with_cost(const set<int> &want_to_read,
const map<int, int> &available,
set<int> *minimum)
available_chunks.insert(i->first);
return _minimum_to_decode(want_to_read, available_chunks, minimum);
}
+END_IGNORE_DEPRECATED
+int ErasureCode::minimum_to_decode_with_cost(const shard_id_set &want_to_read,
+ const shard_id_map<int> &available,
+ shard_id_set *minimum)
+{
+ shard_id_set available_chunks;
+ for (shard_id_map<int>::const_iterator i = available.begin();
+ i != available.end();
+ ++i)
+ available_chunks.insert(i->first);
+ return _minimum_to_decode(want_to_read, available_chunks, minimum);
+}
+
+IGNORE_DEPRECATED
+[[deprecated]]
int ErasureCode::encode_prepare(const bufferlist &raw,
map<int, bufferlist> &encoded) const
{
return 0;
}
+END_IGNORE_DEPRECATED
+
+int ErasureCode::encode_prepare(const bufferlist &raw,
+ shard_id_map<bufferlist> &encoded) const
+{
+ unsigned int k = get_data_chunk_count();
+ unsigned int m = get_chunk_count() - k;
+ unsigned blocksize = get_chunk_size(raw.length());
+ unsigned padded_chunks = k - raw.length() / blocksize;
+ bufferlist prepared = raw;
+
+ for (raw_shard_id_t i; i < k - padded_chunks; ++i) {
+ bufferlist &chunk = encoded[chunk_index(i)];
+ chunk.substr_of(prepared, (int)i * blocksize, blocksize);
+ chunk.rebuild_aligned_size_and_memory(blocksize, SIMD_ALIGN);
+ ceph_assert(chunk.is_contiguous());
+ }
+ if (padded_chunks) {
+ unsigned remainder = raw.length() - (k - padded_chunks) * blocksize;
+ bufferptr buf(buffer::create_aligned(blocksize, SIMD_ALIGN));
+
+ raw.begin((k - padded_chunks) * blocksize).copy(remainder, buf.c_str());
+ buf.zero(remainder, blocksize - remainder);
+ encoded[chunk_index(raw_shard_id_t(k - padded_chunks))].push_back(std::move(buf));
+
+ for (raw_shard_id_t i(k - padded_chunks + 1); i < k; ++i) {
+ bufferptr buf(buffer::create_aligned(blocksize, SIMD_ALIGN));
+ buf.zero();
+ encoded[chunk_index(i)].push_back(std::move(buf));
+ }
+ }
+ for (raw_shard_id_t i(k); i < k + m; ++i) {
+ bufferlist &chunk = encoded[chunk_index(i)];
+ chunk.push_back(buffer::create_aligned(blocksize, SIMD_ALIGN));
+ }
+
+ return 0;
+}
+IGNORE_DEPRECATED
+[[deprecated]]
int ErasureCode::encode(const set<int> &want_to_encode,
const bufferlist &in,
map<int, bufferlist> *encoded)
}
return 0;
}
+END_IGNORE_DEPRECATED
+
+int ErasureCode::encode(const shard_id_set &want_to_encode,
+ const bufferlist &in,
+ shard_id_map<bufferlist> *encoded)
+{
+ unsigned int k = get_data_chunk_count();
+ unsigned int m = get_chunk_count() - k;
+
+ if (!encoded || !encoded->empty()){
+ return -EINVAL;
+ }
+ int err = encode_prepare(in, *encoded);
+ if (err)
+ return err;
+
+ shard_id_map<bufferptr> in_shards(get_chunk_count());
+ shard_id_map<bufferptr> out_shards(get_chunk_count());
+
+ for (raw_shard_id_t raw_shard; raw_shard < get_chunk_count(); ++raw_shard) {
+ shard_id_t shard = chunk_index(raw_shard);
+ if (!encoded->contains(shard)) continue;
+
+ auto bp = encoded->at(shard).begin().get_current_ptr();
+ ceph_assert(bp.length() == encoded->at(shard).length());
+
+ if (raw_shard < k) in_shards[shard] = bp;
+ else out_shards[shard] = bp;
+ }
+
+ encode_chunks(in_shards, out_shards);
+ for (shard_id_t i; i < k + m; ++i) {
+ if (want_to_encode.count(i) == 0)
+ encoded->erase(i);}
+
+ return 0;
+}
+IGNORE_DEPRECATED
+[[deprecated]]
int ErasureCode::_decode(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded)
}
return decode_chunks(want_to_read, chunks, decoded);
}
+END_IGNORE_DEPRECATED
+int ErasureCode::_decode(const shard_id_set &want_to_read,
+ const shard_id_map<bufferlist> &chunks,
+ shard_id_map<bufferlist> *decoded)
+{
+ shard_id_set have;
+
+ if (!decoded || !decoded->empty()){
+ return -EINVAL;
+ }
+ if (!want_to_read.empty() && chunks.empty()) {
+ return -1;
+ }
+
+ for (auto &&[shard, _] : chunks) {
+ have.insert(shard);
+ }
+ if (have.includes(want_to_read)) {
+ for (auto &&shard : want_to_read) {
+ (*decoded)[shard] = chunks.at(shard);
+ }
+ return 0;
+ }
+ unsigned int k = get_data_chunk_count();
+ unsigned int m = get_chunk_count() - k;
+ unsigned blocksize = (*chunks.begin()).second.length();
+ shard_id_set erasures;
+ for (shard_id_t i; i < k + m; ++i) {
+ if (!chunks.contains(i)) {
+ bufferlist tmp;
+ bufferptr ptr(buffer::create_aligned(blocksize, SIMD_ALIGN));
+ tmp.push_back(ptr);
+ tmp.claim_append((*decoded)[i]);
+ (*decoded)[i].swap(tmp);
+ erasures.insert(i);
+ } else {
+ (*decoded)[i] = chunks.find(i)->second;
+ (*decoded)[i].rebuild_aligned(SIMD_ALIGN);
+ }
+ bufferlist &bl = (*decoded)[i];
+ if (bl.length() != bl.begin().get_current_ptr().length()) {
+ bl.rebuild();
+ }
+ }
+ shard_id_map<bufferptr> in(get_chunk_count());
+ shard_id_map<bufferptr> out(get_chunk_count());
+ for (auto&& [shard, list] : *decoded) {
+ auto bp = list.begin().get_current_ptr();
+ ceph_assert(bp.length() == list.length());
+ if (erasures.find(shard) == erasures.end()) in[shard] = bp;
+ else out[shard] = bp;
+ }
+ return decode_chunks(want_to_read, in, out);
+}
+
+[[deprecated]]
int ErasureCode::decode(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded, int chunk_size)
return _decode(want_to_read, chunks, decoded);
}
+int ErasureCode::decode(const shard_id_set &want_to_read,
+ const shard_id_map<bufferlist> &chunks,
+ shard_id_map<bufferlist> *decoded, int chunk_size)
+{
+ return _decode(want_to_read, chunks, decoded);
+}
+
int ErasureCode::parse(const ErasureCodeProfile &profile,
ostream *ss)
{
return to_mapping(profile, ss);
}
-const vector<int> &ErasureCode::get_chunk_mapping() const {
+const vector<shard_id_t> &ErasureCode::get_chunk_mapping() const {
return chunk_mapping;
}
vector<int> coding_chunk_mapping;
for(std::string::iterator it = mapping.begin(); it != mapping.end(); ++it) {
if (*it == 'D')
- chunk_mapping.push_back(position);
+ chunk_mapping.push_back(shard_id_t(position));
else
coding_chunk_mapping.push_back(position);
position++;
return 0;
}
+IGNORE_DEPRECATED
+[[deprecated]]
int ErasureCode::decode_concat(const set<int>& want_to_read,
const map<int, bufferlist> &chunks,
bufferlist *decoded)
return r;
}
+[[deprecated]]
int ErasureCode::decode_concat(const map<int, bufferlist> &chunks,
bufferlist *decoded)
{
}
return decode_concat(want_to_read, chunks, decoded);
}
+END_IGNORE_DEPRECATED
}
public:
static const unsigned SIMD_ALIGN;
- std::vector<int> chunk_mapping;
+ std::vector<shard_id_t> chunk_mapping;
ErasureCodeProfile _profile;
// for CRUSH rule
return 1;
}
+ [[deprecated]]
virtual int _minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available_chunks,
std::set<int> *minimum);
+ virtual int _minimum_to_decode(const shard_id_set &want_to_read,
+ const shard_id_set &available_chunks,
+ shard_id_set *minimum);
+
+ [[deprecated]]
int minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available,
std::map<int,
std::vector<std::pair<int,
int>>> *minimum) override;
+ int minimum_to_decode(const shard_id_set &want_to_read,
+ const shard_id_set &available,
+ shard_id_set &minimum_set,
+ mini_flat_map<shard_id_t, std::vector<std::pair<int, int>>> *minimum_sub_chunks) override;
+
+ [[deprecated]]
int minimum_to_decode_with_cost(const std::set<int> &want_to_read,
const std::map<int, int> &available,
std::set<int> *minimum) override;
+ int minimum_to_decode_with_cost(const shard_id_set &want_to_read,
+ const mini_flat_map<shard_id_t, int> &available,
+ shard_id_set *minimum) override;
+
+ int encode_prepare(const bufferlist &raw,
+ mini_flat_map<shard_id_t, bufferlist> &encoded) const;
+
+ [[deprecated]]
int encode_prepare(const bufferlist &raw,
std::map<int, bufferlist> &encoded) const;
+ int encode(const shard_id_set &want_to_encode,
+ const bufferlist &in,
+ mini_flat_map<shard_id_t, bufferlist> *encoded) override;
+
+ [[deprecated]]
int encode(const std::set<int> &want_to_encode,
const bufferlist &in,
std::map<int, bufferlist> *encoded) override;
+ [[deprecated]]
int decode(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded,
int chunk_size) override;
+ int decode(const shard_id_set &want_to_read,
+ const mini_flat_map<shard_id_t, bufferlist> &chunks,
+ mini_flat_map<shard_id_t, bufferlist> *decoded, int chunk_size) override;
+
+ [[deprecated]]
virtual int _decode(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded);
- const std::vector<int> &get_chunk_mapping() const override;
+ virtual int _decode(const shard_id_set &want_to_read,
+ const mini_flat_map<shard_id_t, bufferlist> &chunks,
+ mini_flat_map<shard_id_t, bufferlist> *decoded);
+
+ const std::vector<shard_id_t> &get_chunk_mapping() const override;
int to_mapping(const ErasureCodeProfile &profile, std::ostream *ss);
const std::string &default_value,
std::ostream *ss);
+ [[deprecated]]
int decode_concat(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
bufferlist *decoded) override;
+ [[deprecated]]
int decode_concat(const std::map<int, bufferlist> &chunks,
bufferlist *decoded) override;
int parse(const ErasureCodeProfile &profile, std::ostream *ss);
private:
- int chunk_index(unsigned int i) const;
+ [[deprecated]]
+ unsigned int chunk_index(unsigned int i) const;
+ shard_id_t chunk_index(raw_shard_id_t i) const;
};
}
#include <memory>
#include <string>
#include "include/buffer_fwd.h"
+#include "osd/osd_types.h"
+
+#define IGNORE_DEPRECATED \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\" ") \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"")
+
+#define END_IGNORE_DEPRECATED \
+ _Pragma("clang pop") \
+ _Pragma("GCC pop")
+
class CrushWrapper;
* subchunk index offsets, count.
* @return **0** on success or a negative errno on error.
*/
+ virtual int minimum_to_decode(const shard_id_set &want_to_read,
+ const shard_id_set &available,
+ shard_id_set &minimum_set,
+ mini_flat_map<shard_id_t, std::vector<std::pair<int, int>>>
+ *minimum_sub_chunks) = 0;
+
+ // Interface for legacy EC.
+ [[deprecated]]
virtual int minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available,
std::map<int, std::vector<std::pair<int, int>>>
* @param [out] minimum chunk indexes to retrieve
* @return **0** on success or a negative errno on error.
*/
+ virtual int minimum_to_decode_with_cost(const shard_id_set &want_to_read,
+ const shard_id_map<int> &available,
+ shard_id_set *minimum) = 0;
+
+ [[deprecated]]
virtual int minimum_to_decode_with_cost(const std::set<int> &want_to_read,
const std::map<int, int> &available,
std::set<int> *minimum) = 0;
virtual size_t get_minimum_granularity() = 0;
/**
+ * Note: The encode function is used for the older EC code path
+ * that is used when EC optimizations are turned off. EC optimizations
+ * are turned off for new pools by default.
+ *
* Encode the content of **in** and store the result in
* **encoded**. All buffers pointed to by **encoded** have the
* same size. The **encoded** map contains at least all chunk
* @param [out] encoded map chunk indexes to chunk data
* @return **0** on success or a negative errno on error.
*/
- virtual int encode(const std::set<int> &want_to_encode,
+ virtual int encode(const shard_id_set &want_to_encode,
const bufferlist &in,
- std::map<int, bufferlist> *encoded) = 0;
-
+ shard_id_map<bufferlist> *encoded) = 0;
+ [[deprecated]]
+ virtual int encode(const std::set<int> &want_to_encode,
+ const bufferlist &in,
+ std::map<int, bufferlist> *encoded) = 0;
+ [[deprecated]]
virtual int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, bufferlist> *encoded) = 0;
+ /**
+ * Note: The encode_chunks function is used by the older EC code path
+ * that is used when EC optimizations are turned off. It is also used
+ * when EC optimizations are turned on.
+ *
+ * Encode the content of **in** and store the result in
+ * **out**. All buffers pointed to by **in** and **out** have the
+ * same size.
+ *
+ * The data chunks to be encoded are provided in the in map, these buffers
+ * are considered to be immutable (neither the bufferptr or the contents
+ * of the buffer may be changed). Some of these bufferptrs may be a special
+ * bufferptr representing a buffer of zeros. There is no way to represent
+ * a buffer for a chunk that consists of a mixture of data and zeros,
+ * the caller is expected to make multiple calls to encode_chunks using smaller
+ * buffers if this optimzation is worthwhile. The bufferptrs are expected to
+ * have suitable alignment (page alignment) and are a single contiguous
+ * range of memory. The caller is likely to have a bufferlist per chunk
+ * and may either need to make multiple calls to encode_chunks or use
+ * rebuild_and_align to create a single contiguous buffer for each chunk.
+ *
+ * The coding parity chunk bufferptrs are allocated by the caller and
+ * populated in the out map. These bufferptrs are expected to be written to
+ * by the erasure code plugin. Again the bufferptrs are expected to have
+ * suitable alignment and are a single contiguous range of memory.
+ * The erasure code plugin may replace one or more of these bufferptrs
+ * with a special bufferptr representing a buffer of zeros.
+ *
+ * Returns 0 on success.
+ *
+ * @param [in] in map of data shards to be encoded
+ * @param [out] out map of empty buffers for parity to be written to
+ * @return **0** on success or a negative errno on error.
+ */
+ virtual int encode_chunks(const shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) = 0;
+
/**
* Calculate the delta between the old_data and new_data buffers using xor,
* (or plugin-specific implementation) and returns the result in the
* @param [in] new_data second buffer to xor
* @param [out] delta buffer containing the delta of old_data and new_data
*/
- virtual void apply_delta(const std::map<int, bufferptr> &in,
- std::map <int, bufferptr> &out) = 0;
+ virtual void apply_delta(const shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) = 0;
/**
+ * N.B This function is not used when EC optimizations are
+ * turned on for the pool.
+ *
* Decode the **chunks** and store at least **want_to_read**
* chunks in **decoded**.
*
* @param [in] chunk_size chunk size
* @return **0** on success or a negative errno on error.
*/
+ virtual int decode(const shard_id_set &want_to_read,
+ const shard_id_map<bufferlist> &chunks,
+ shard_id_map<bufferlist> *decoded, int chunk_size) = 0;
+ [[deprecated]]
virtual int decode(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded, int chunk_size) = 0;
+ /**
+ * Decode the **in** map and store at least **want_to_read**
+ * shards in the **out** map.
+ *
+ * There must be enough shards in the **in** map( as returned by
+ * **minimum_to_decode** or **minimum_to_decode_with_cost** ) to
+ * perform a successful decoding of all shards listed in
+ * **want_to_read**.
+ *
+ * All buffers pointed to by **in** must have the same size.
+ * **out** must contain empty buffers that are the same size as the
+ * **in*** buffers.
+ *
+ * On success, the **out** map may contain more shards than
+ * required by **want_to_read** and they can safely be used by the
+ * caller.
+ *
+ * Returns 0 on success.
+ *
+ * @param [in] want_to_read shard indexes to be decoded
+ * @param [in] in map of available shard indexes to shard data
+ * @param [out] out map of shard indexes that nede to be decoded to empty buffers
+ * @return **0** on success or a negative errno on error.
+ */
+ virtual int decode_chunks(const shard_id_set &want_to_read,
+ shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) = 0;
+
+ [[deprecated]]
virtual int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded) = 0;
*
* @return vector<int> list of indices of chunks to be remapped
*/
- virtual const std::vector<int> &get_chunk_mapping() const = 0;
+ virtual const std::vector<shard_id_t> &get_chunk_mapping() const = 0;
/**
* Decode the first **get_data_chunk_count()** **chunks** and
* will be concatenated into `decoded` in index order
* @return **0** on success or a negative errno on error.
*/
+ [[deprecated]]
virtual int decode_concat(const std::set<int>& want_to_read,
const std::map<int, bufferlist> &chunks,
bufferlist *decoded) = 0;
+ [[deprecated]]
virtual int decode_concat(const std::map<int, bufferlist> &chunks,
bufferlist *decoded) = 0;
#define LARGEST_VECTOR_WORDSIZE 16
#define talloc(type, num) (type *) malloc(sizeof(type)*(num))
+/* The new EC API work for Clay requires significant testing. We ignore all
+ * deprecated function use in this file until that refactor is done.
+ */
+IGNORE_DEPRECATED
+
using namespace std;
using namespace ceph;
static ostream& _prefix(std::ostream* _dout)
return mds.erasure_code->get_minimum_granularity();
}
+[[deprecated]]
int ErasureCodeClay::minimum_to_decode(const set<int> &want_to_read,
const set<int> &available,
map<int, vector<pair<int, int>>> *minimum)
}
}
+[[deprecated]]
int ErasureCodeClay::decode(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded, int chunk_size)
return res;
}
+#if 0 \
+/* This code was partially tested, so keeping code, but we need more
+ * refactoring and testing before it is ready for production.
+ */
+int ErasureCodeClay::encode_chunks(const std::map<int, bufferptr> &in,
+ std::map<int, bufferptr> &out)
+{
+ map<int, bufferlist> chunks;
+ set<int> parity_chunks;
+ unsigned int size = 0;
+ auto& nonconst_in = const_cast<std::map<int, bufferptr>&>(in);
+
+ for (auto &&[shard, ptr] : nonconst_in) {
+ if (size == 0) size = ptr.length();
+ else ceph_assert(size == ptr.length());
+ chunks[shard].append(nonconst_in[shard]);
+ }
+
+ for (auto &&[shard, ptr] : out) {
+ if (size == 0) size = ptr.length();
+ else ceph_assert(size == ptr.length());
+ chunks[shard+nu].append(out[shard]);
+ parity_chunks.insert(shard+nu);
+ }
+
+ for (int i = k; i < k + nu; i++) {
+ bufferptr buf(buffer::create_aligned(size, SIMD_ALIGN));
+ buf.zero();
+ chunks[i].push_back(std::move(buf));
+ }
+
+ int res = decode_layered(parity_chunks, &chunks);
+ for (int i = k ; i < k + nu; i++) {
+ // need to clean some of the intermediate chunks here!!
+ chunks[i].clear();
+ }
+ return res;
+}
+#endif
+
int ErasureCodeClay::decode_chunks(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded)
z = (z - z_vec[t-1-i]) / q;
}
}
+
+END_IGNORE_DEPRECATED
size_t get_minimum_granularity() override;
+ using ErasureCode::minimum_to_decode;
int minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available,
std::map<int, std::vector<std::pair<int, int>>> *minimum) override;
+ using ErasureCode::decode;
int decode(const std::set<int> &want_to_read,
const std::map<int, ceph::bufferlist> &chunks,
std::map<int, ceph::bufferlist> *decoded, int chunk_size) override;
+ [[deprecated]]
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, ceph::bufferlist> *encoded) override;
+ // Stub for new encode chunks interface. Can be deleted once new EC is
+ // supported for all plugins.
+ int encode_chunks(const shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) override
+ {
+ ceph_abort_msg("Not implemented for this plugin");
+ }
+
+ [[deprecated]]
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::bufferlist> &chunks,
std::map<int, ceph::bufferlist> *decoded) override;
+
+ // Stub for new encode chunks interface. Can be deleted once new EC is
+ // supported for all plugins.
+ virtual int decode_chunks(const shard_id_set &want_to_read,
+ shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out)
+ {
+ ceph_abort_msg("Not implemented for this plugin");
+ }
+
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
int is_repair(const std::set<int> &want_to_read,
return isa_decode(erasures, data, coding, blocksize);
}
+int ErasureCodeIsa::encode_chunks(const shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out)
+{
+ char *chunks[k + m]; //TODO don't use variable length arrays
+ memset(chunks, 0, sizeof(char*) * (k + m));
+ uint64_t size = 0;
+
+ for (auto &&[shard, ptr] : in) {
+ if (size == 0) size = ptr.length();
+ else ceph_assert(size == ptr.length());
+ chunks[static_cast<int>(shard)] = const_cast<char*>(ptr.c_str());
+ }
+
+ for (auto &&[shard, ptr] : out) {
+ if (size == 0) size = ptr.length();
+ else ceph_assert(size == ptr.length());
+ chunks[static_cast<int>(shard)] = ptr.c_str();
+ }
+
+ char *zeros = nullptr;
+
+ for (shard_id_t i; i < k + m; ++i) {
+ if (in.contains(i) || out.contains(i)) continue;
+
+ if (zeros == nullptr) {
+ zeros = (char*)malloc(size);
+ memset(zeros, 0, size);
+ }
+
+ chunks[static_cast<int>(i)] = zeros;
+ }
+
+ isa_encode(&chunks[0], &chunks[k], size);
+
+ if (zeros != nullptr) free(zeros);
+
+ return 0;
+}
+
+int ErasureCodeIsa::decode_chunks(const shard_id_set &want_to_read,
+ shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out)
+{
+ unsigned int size = 0;
+ shard_id_set erasures_set;
+ shard_id_set to_free;
+ erasures_set.insert_range(shard_id_t(0), k + m);
+ int erasures[k + m + 1];
+ int erasures_count = 0;
+ char *data[k];
+ char *coding[m];
+ memset(data, 0, sizeof(char*) * k);
+ memset(coding, 0, sizeof(char*) * m);
+
+ for (auto &&[shard, ptr] : in) {
+ if (size == 0) size = ptr.length();
+ else ceph_assert(size == ptr.length());
+ if (shard < k) {
+ data[static_cast<int>(shard)] = const_cast<char*>(ptr.c_str());
+ }
+ else {
+ coding[static_cast<int>(shard) - k] = const_cast<char*>(ptr.c_str());
+ }
+ erasures_set.erase(shard);
+ }
+
+ for (auto &&[shard, ptr] : out) {
+ if (size == 0) size = ptr.length();
+ else ceph_assert(size == ptr.length());
+ if (shard < k) {
+ data[static_cast<int>(shard)] = const_cast<char*>(ptr.c_str());
+ }
+ else {
+ coding[static_cast<int>(shard) - k] = const_cast<char*>(ptr.c_str());
+ }
+ }
+
+ for (int i = 0; i < k + m; i++) {
+ char **buf = i < k ? &data[i] : &coding[i - k];
+ if (*buf == nullptr) {
+ *buf = (char *)malloc(size);
+ to_free.insert(shard_id_t(i));
+ }
+ }
+
+ for (auto && shard : erasures_set) {
+ erasures[erasures_count++] = static_cast<int>(shard);
+ }
+
+
+ erasures[erasures_count] = -1;
+ ceph_assert(erasures_count > 0);
+ int r = isa_decode(erasures, data, coding, size);
+ for (auto & shard : to_free) {
+ int i = static_cast<int>(shard);
+ char **buf = i < k ? &data[i] : &coding[i - k];
+ free(*buf);
+ *buf = nullptr;
+ }
+ return r;
+}
+
// -----------------------------------------------------------------------------
void
unsigned int get_chunk_size(unsigned int stripe_width) const override;
+ [[deprecated]]
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, ceph::buffer::list> *encoded) override;
+ int encode_chunks(const shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) override;
+ [[deprecated]]
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
+ int decode_chunks(const shard_id_set &want_to_read,
+ shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) override;
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
int parse(ceph::ErasureCodeProfile &profile,
std::ostream *ss) override;
};
+static_assert(!std::is_abstract<ErasureCodeIsaDefault>());
#endif
}
}
+[[deprecated]]
int ErasureCodeJerasure::encode_chunks(const set<int> &want_to_encode,
map<int, bufferlist> *encoded)
{
return 0;
}
+int ErasureCodeJerasure::encode_chunks(const shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out)
+{
+ char *chunks[k + m]; //TODO don't use variable length arrays
+ memset(chunks, 0, sizeof(char*) * (k + m));
+ uint64_t size = 0;
+
+ for (auto &&[shard, ptr] : in) {
+ if (size == 0) size = ptr.length();
+ else ceph_assert(size == ptr.length());
+ chunks[static_cast<int>(shard)] = const_cast<char*>(ptr.c_str());
+ }
+
+ for (auto &&[shard, ptr] : out) {
+ if (size == 0) size = ptr.length();
+ else ceph_assert(size == ptr.length());
+ chunks[static_cast<int>(shard)] = ptr.c_str();
+ }
+
+ char *zeros = nullptr;
+
+ for (shard_id_t i; i < k + m; ++i) {
+ if (in.contains(i) || out.contains(i)) continue;
+
+ if (zeros == nullptr) {
+ zeros = (char*)malloc(size);
+ memset(zeros, 0, size);
+ }
+
+ chunks[static_cast<int>(i)] = zeros;
+ }
+
+ jerasure_encode(&chunks[0], &chunks[k], size);
+
+ if (zeros != nullptr) free(zeros);
+
+ return 0;
+}
+
+[[deprecated]]
int ErasureCodeJerasure::decode_chunks(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded)
return jerasure_decode(erasures, data, coding, blocksize);
}
+int ErasureCodeJerasure::decode_chunks(const shard_id_set &want_to_read,
+ shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out)
+{
+ unsigned int size = 0;
+ shard_id_set erasures_set;
+ shard_id_set to_free;
+ erasures_set.insert_range(shard_id_t(0), k + m);
+ int erasures[k + m + 1];
+ int erasures_count = 0;
+ char *data[k];
+ char *coding[m];
+ memset(data, 0, sizeof(char*) * k);
+ memset(coding, 0, sizeof(char*) * m);
+
+ for (auto &&[shard, ptr] : in) {
+ if (size == 0) size = ptr.length();
+ else ceph_assert(size == ptr.length());
+ if (shard < k) {
+ data[static_cast<int>(shard)] = const_cast<char*>(ptr.c_str());
+ }
+ else {
+ coding[static_cast<int>(shard) - k] = const_cast<char*>(ptr.c_str());
+ }
+ erasures_set.erase(shard);
+ }
+
+ for (auto &&[shard, ptr] : out) {
+ if (size == 0) size = ptr.length();
+ else ceph_assert(size == ptr.length());
+ if (shard < k) {
+ data[static_cast<int>(shard)] = const_cast<char*>(ptr.c_str());
+ }
+ else {
+ coding[static_cast<int>(shard) - k] = const_cast<char*>(ptr.c_str());
+ }
+ }
+
+ for (int i = 0; i < k + m; i++) {
+ char **buf = i < k ? &data[i] : &coding[i - k];
+ if (*buf == nullptr) {
+ *buf = (char *)malloc(size);
+ to_free.insert(shard_id_t(i));
+ }
+ }
+
+ for (auto && shard : erasures_set) {
+ erasures[erasures_count++] = static_cast<int>(shard);
+ }
+
+
+ erasures[erasures_count] = -1;
+ ceph_assert(erasures_count > 0);
+ int r = jerasure_decode(erasures, data, coding, size);
+ for (auto & shard : to_free) {
+ int i = static_cast<int>(shard);
+ char **buf = i < k ? &data[i] : &coding[i - k];
+ free(*buf);
+ *buf = nullptr;
+ }
+ return r;
+}
+
void ErasureCodeJerasure::encode_delta(const bufferptr &old_data,
const bufferptr &new_data,
bufferptr *delta_maybe_in_place)
unsigned int get_chunk_size(unsigned int stripe_width) const override;
+ [[deprecated]]
int encode_chunks(const std::set<int> &want_to_encode,
- std::map<int, ceph::buffer::list> *encoded) override;
+ std::map<int, ceph::buffer::list> *encoded) override;
+ int encode_chunks(const shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) override;
+ [[deprecated]]
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
+ int decode_chunks(const shard_id_set &want_to_read,
+ shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) override;
void encode_delta(const ceph::bufferptr &old_data,
const ceph::bufferptr &new_data,
layer.data.push_back(position);
if (*it == 'c')
layer.coding.push_back(position);
- if (*it == 'c' || *it == 'D')
- layer.chunks_as_set.insert(position);
+ if (*it == 'c' || *it == 'D') {
+ layer.chunks_as_set.insert(position);
+ layer.chunks_as_shard_set.insert(shard_id_t(position));
+ }
position++;
}
layer.chunks = layer.data;
return result;
}
+shard_id_set ErasureCodeLrc::get_erasures(const shard_id_set &want,
+ const shard_id_set &available) const
+{
+ return shard_id_set::difference(want, available);
+}
+
unsigned int ErasureCodeLrc::get_chunk_size(unsigned int stripe_width) const
{
return layers.front().erasure_code->get_chunk_size(stripe_width);
return layers.front().erasure_code->get_minimum_granularity();
}
-void p(const set<int> &s) { cerr << s; } // for gdb
+void p(const shard_id_set &s) { cerr << s; } // for gdb
+[[deprecated]]
int ErasureCodeLrc::_minimum_to_decode(const set<int> &want_to_read,
const set<int> &available_chunks,
set<int> *minimum)
return -EIO;
}
+int ErasureCodeLrc::_minimum_to_decode(const shard_id_set &want_to_read,
+ const shard_id_set &available_chunks,
+ shard_id_set *minimum)
+{
+ dout(20) << __func__ << " want_to_read " << want_to_read
+ << " available_chunks " << available_chunks << dendl;
+ {
+ shard_id_set erasures_total;
+ shard_id_set erasures_not_recovered;
+ shard_id_set erasures_want;
+ for (shard_id_t i; i < get_chunk_count(); ++i) {
+ if (available_chunks.count(i) == 0) {
+ erasures_total.insert(i);
+ erasures_not_recovered.insert(i);
+ if (want_to_read.count(i) != 0)
+ erasures_want.insert(i);
+ }
+ }
+
+ //
+ // Case 1:
+ //
+ // When no chunk is missing there is no need to read more than what
+ // is wanted.
+ //
+ if (erasures_want.empty()) {
+ *minimum = want_to_read;
+ dout(20) << __func__ << " minimum == want_to_read == "
+ << want_to_read << dendl;
+ return 0;
+ }
+
+ //
+ // Case 2:
+ //
+ // Try to recover erasures with as few chunks as possible.
+ //
+ for (vector<Layer>::reverse_iterator i = layers.rbegin();
+ i != layers.rend();
+ ++i) {
+ //
+ // If this layer has no chunk that we want, skip it.
+ //
+ shard_id_set layer_want;
+ layer_want = shard_id_set::intersection(want_to_read, i->chunks_as_shard_set);
+ if (layer_want.empty())
+ continue;
+ //
+ // Are some of the chunks we want missing ?
+ //
+ shard_id_set layer_erasures = shard_id_set::intersection(layer_want, erasures_want);
+
+ shard_id_set layer_minimum;
+ if (layer_erasures.empty()) {
+ //
+ // The chunks we want are available, this is the minimum we need
+ // to read.
+ //
+ layer_minimum = layer_want;
+ } else {
+ shard_id_set erasures = shard_id_set::intersection(i->chunks_as_shard_set, erasures_not_recovered);
+
+ if (erasures.size() > i->erasure_code->get_coding_chunk_count()) {
+ //
+ // There are too many erasures for this layer to recover: skip
+ // it and hope that an upper layer will be do better.
+ //
+ continue;
+ } else {
+ //
+ // Get all available chunks in that layer to recover the
+ // missing one(s).
+ //
+ layer_minimum = shard_id_set::difference(i->chunks_as_shard_set, erasures_not_recovered);
+ //
+ // Chunks recovered by this layer are removed from the list of
+ // erasures so that upper levels do not attempt to recover
+ // them.
+ //
+ for (shard_id_set::const_iterator j = erasures.begin();
+ j != erasures.end();
+ ++j) {
+ erasures_not_recovered.erase(*j);
+ erasures_want.erase(*j);
+ }
+ }
+ }
+ minimum->insert(layer_minimum);
+ }
+ if (erasures_want.empty()) {
+ minimum->insert(want_to_read);
+ for (shard_id_set::const_iterator i = erasures_total.begin();
+ i != erasures_total.end();
+ ++i) {
+ if (minimum->count(*i))
+ minimum->erase(*i);
+ }
+ dout(20) << __func__ << " minimum = " << *minimum << dendl;
+ return 0;
+ }
+ }
+
+ {
+ //
+ // Case 3:
+ //
+ // The previous strategy failed to recover from all erasures.
+ //
+ // Try to recover as many chunks as possible, even from layers
+ // that do not contain chunks that we want, in the hope that it
+ // will help the upper layers.
+ //
+ shard_id_set erasures_total;
+ for (shard_id_t i; i < get_chunk_count(); ++i) {
+ if (available_chunks.count(i) == 0)
+ erasures_total.insert(i);
+ }
+
+ for (vector<Layer>::reverse_iterator i = layers.rbegin();
+ i != layers.rend();
+ ++i) {
+ shard_id_set layer_erasures = shard_id_set::intersection(i->chunks_as_shard_set, erasures_total);
+
+ // If this layer has no erasure, skip it
+ //
+ if (layer_erasures.empty())
+ continue;
+
+ if (layer_erasures.size() > 0 &&
+ layer_erasures.size() <= i->erasure_code->get_coding_chunk_count()) {
+ //
+ // chunks recovered by this layer are removed from the list of
+ // erasures so that upper levels know they can rely on their
+ // availability
+ //
+ for (shard_id_set::const_iterator j = layer_erasures.begin();
+ j != layer_erasures.end();
+ ++j) {
+ erasures_total.erase(*j);
+ }
+ }
+ }
+ if (erasures_total.empty()) {
+ //
+ // Do not try to be smart about what chunks are necessary to
+ // recover, use all available chunks.
+ //
+ *minimum = available_chunks;
+ dout(20) << __func__ << " minimum == available_chunks == "
+ << available_chunks << dendl;
+ return 0;
+ }
+ }
+
+ derr << __func__ << " not enough chunks in " << available_chunks
+ << " to read " << want_to_read << dendl;
+ return -EIO;
+}
+
+IGNORE_DEPRECATED
+[[deprecated]]
int ErasureCodeLrc::encode_chunks(const set<int> &want_to_encode,
map<int, bufferlist> *encoded)
{
}
return 0;
}
+END_IGNORE_DEPRECATED
+
+int ErasureCodeLrc::encode_chunks(const shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out)
+{
+ unsigned int chunk_size = 0;
+ shard_id_set all_shards;
+ auto& nonconst_in = const_cast<shard_id_map<bufferptr>&>(in);
+
+ for (const auto& [shard, ptr] : in) {
+ all_shards.insert(shard);
+ if (chunk_size == 0) chunk_size = ptr.length();
+ else ceph_assert(chunk_size == ptr.length());
+ }
+
+ unsigned int top = layers.size();
+ shard_id_set out_shards;
+ for (const auto& [shard, ptr] : out) {
+ out_shards.insert(shard);
+ all_shards.insert(shard);
+ if (chunk_size == 0) chunk_size = ptr.length();
+ else ceph_assert(chunk_size == ptr.length());
+ }
+
+ for (vector<Layer>::reverse_iterator i = layers.rbegin(); i != layers.rend(); ++i) {
+ --top;
+ if (i->chunks_as_shard_set.includes(all_shards)) {
+ break;
+ }
+ }
+
+ for (unsigned int i = top; i < layers.size(); ++i) {
+ const Layer &layer = layers[i];
+ shard_id_map<bufferptr> layer_in(get_chunk_count());
+ shard_id_map<bufferptr> layer_out(get_chunk_count());
+ shard_id_t j;
+ for (const auto& c : layer.chunks) {
+ if (nonconst_in.contains(shard_id_t(c)))
+ layer_in[j] = nonconst_in[shard_id_t(c)];
+ if (out.contains(shard_id_t(c)))
+ layer_out[j] = out[shard_id_t(c)];
+ ++j;
+ }
+ int err = layer.erasure_code->encode_chunks(layer_in, layer_out);
+
+ if (err) {
+ derr << __func__ << " layer " << layer.chunks_map
+ << " failed with " << err << " trying to encode "
+ << dendl;
+ return err;
+ }
+ }
+ return 0;
+}
+IGNORE_DEPRECATED
+[[deprecated]]
int ErasureCodeLrc::decode_chunks(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded)
return 0;
}
}
+END_IGNORE_DEPRECATED
+
+int ErasureCodeLrc::decode_chunks(const shard_id_set &want_to_read,
+ shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out)
+{
+ shard_id_set available_chunks;
+ shard_id_set erasures;
+ unsigned int chunk_size = 0;
+
+ for (const auto& [shard, ptr] : in) {
+ if (chunk_size == 0) chunk_size = ptr.length();
+ else ceph_assert(chunk_size == ptr.length());
+ available_chunks.insert(shard);
+ }
+
+ for (const auto& [shard, ptr] : out) {
+ if (chunk_size == 0) chunk_size = ptr.length();
+ else ceph_assert(chunk_size == ptr.length());
+ erasures.insert(shard);
+ }
+
+ shard_id_set want_to_read_erasures;
+
+ for (vector<Layer>::reverse_iterator layer = layers.rbegin();
+ layer != layers.rend();
+ ++layer) {
+ shard_id_set layer_erasures = shard_id_set::intersection(layer->chunks_as_shard_set, erasures);
+
+ if (layer_erasures.size() >
+ layer->erasure_code->get_coding_chunk_count()) {
+ // skip because there are too many erasures for this layer to recover
+ } else if(layer_erasures.size() == 0) {
+ // skip because all chunks are already available
+ } else {
+ shard_id_set layer_want_to_read;
+ shard_id_map<bufferptr> layer_in(get_chunk_count());
+ shard_id_map<bufferptr> layer_out(get_chunk_count());
+ shard_id_t j;
+ for (vector<int>::const_iterator c = layer->chunks.begin();
+ c != layer->chunks.end();
+ ++c)
+ {
+ shard_id_t cs(*c);
+ if (!erasures.contains(cs)) {
+ if (in.contains(cs)) layer_in[j] = in[cs];
+ else layer_in[j] = out[cs];
+ }
+ else {
+ layer_out[j] = out[cs];
+ }
+ ++j;
+ }
+ int err = layer->erasure_code->decode_chunks(layer_want_to_read, layer_in, layer_out);
+ if (err) {
+ derr << __func__ << " layer " << layer->chunks_map
+ << " failed with " << err << " trying to decode "
+ << layer_want_to_read << " with " << available_chunks << dendl;
+ return err;
+ }
+
+ for (vector<int>::const_iterator c = layer->chunks.begin();
+ c != layer->chunks.end();
+ ++c)
+ {
+ erasures.erase(shard_id_t(*c));
+ }
+ want_to_read_erasures = shard_id_set::intersection(erasures, want_to_read);
+ if (want_to_read_erasures.size() == 0)
+ break;
+ }
+ }
+
+ if (want_to_read_erasures.size() > 0) {
+ derr << __func__ << " want to read " << want_to_read
+ << " with available_chunks = " << available_chunks
+ << " end up being unable to read " << want_to_read_erasures << dendl;
+ return -EIO;
+ } else {
+ return 0;
+ }
+}
std::vector<int> data;
std::vector<int> coding;
std::vector<int> chunks;
+ shard_id_set chunks_as_shard_set;
std::set<int> chunks_as_set;
std::string chunks_map;
ceph::ErasureCodeProfile profile;
~ErasureCodeLrc() override {}
+ [[deprecated]]
std::set<int> get_erasures(const std::set<int> &need,
const std::set<int> &available) const;
+ shard_id_set get_erasures(const shard_id_set &need,
+ const shard_id_set &available) const;
+
+ [[deprecated]]
int _minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available,
std::set<int> *minimum) override;
+ int _minimum_to_decode(const shard_id_set &want_to_read,
+ const shard_id_set &available,
+ shard_id_set *minimum) override;
int create_rule(const std::string &name,
CrushWrapper &crush,
size_t get_minimum_granularity() override;
+ [[deprecated]]
int encode_chunks(const std::set<int> &want_to_encode,
- std::map<int, ceph::buffer::list> *encoded) override;
-
+ std::map<int, ceph::buffer::list> *encoded) override;
+ int encode_chunks(const shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out);
+ [[deprecated]]
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
+ int decode_chunks(const shard_id_set &want_to_read,
+ shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) override;
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
int layers_sanity_checks(const std::string &description_string,
std::ostream *ss) const;
};
+static_assert(!std::is_abstract<ErasureCodeLrc>());
#endif
return padded_length / k;
}
+IGNORE_DEPRECATED
+[[deprecated]]
int ErasureCodeShec::_minimum_to_decode(const set<int> &want_to_read,
const set<int> &available_chunks,
set<int> *minimum_chunks)
return 0;
}
+END_IGNORE_DEPRECATED
+int ErasureCodeShec::_minimum_to_decode(const shard_id_set &want_to_read,
+ const shard_id_set &available_chunks,
+ shard_id_set *minimum_chunks)
+{
+ if (!minimum_chunks) return -EINVAL;
+
+ for (shard_id_set::const_iterator it = available_chunks.begin(); it != available_chunks.end(); ++it){
+ if (*it < 0 || k+m <= *it) return -EINVAL;
+ }
+
+ for (shard_id_set::const_iterator it = want_to_read.begin(); it != want_to_read.end(); ++it){
+ if (*it < 0 || k+m <= *it) return -EINVAL;
+ }
+
+ int want[k + m];
+ int avails[k + m];
+ int minimum[k + m];
+
+ memset(want, 0, sizeof(want));
+ memset(avails, 0, sizeof(avails));
+ memset(minimum, 0, sizeof(minimum));
+ (*minimum_chunks).clear();
+
+ for (shard_id_t shard : want_to_read) {
+ want[static_cast<int>(shard)] = 1;
+ }
+
+ for (shard_id_t shard : available_chunks) {
+ avails[static_cast<int>(shard)] = 1;
+ }
+
+ {
+ int decoding_matrix[k*k];
+ int dm_row[k];
+ int dm_column[k];
+ memset(decoding_matrix, 0, sizeof(decoding_matrix));
+ memset(dm_row, 0, sizeof(dm_row));
+ memset(dm_column, 0, sizeof(dm_column));
+ if (shec_make_decoding_matrix(true, want, avails, decoding_matrix, dm_row, dm_column, minimum) < 0) {
+ return -EIO;
+ }
+ }
+
+ for (int i = 0; i < k + m; i++) {
+ if (minimum[i] == 1) minimum_chunks->insert(shard_id_t(i));
+ }
+
+ return 0;
+}
+
+IGNORE_DEPRECATED
+[[deprecated]]
int ErasureCodeShec::minimum_to_decode_with_cost(const set<int> &want_to_read,
const map<int, int> &available,
set<int> *minimum_chunks)
return _minimum_to_decode(want_to_read, available_chunks, minimum_chunks);
}
+END_IGNORE_DEPRECATED
+
+int ErasureCodeShec::minimum_to_decode_with_cost(const shard_id_set &want_to_read,
+ const shard_id_map<int> &available,
+ shard_id_set *minimum_chunks)
+{
+ shard_id_set available_chunks;
+
+ for (shard_id_map<int>::const_iterator i = available.begin();
+ i != available.end();
+ ++i)
+ available_chunks.insert(i->first);
+
+ return _minimum_to_decode(want_to_read, available_chunks, minimum_chunks);
+}
+IGNORE_DEPRECATED
+[[deprecated]]
int ErasureCodeShec::encode(const set<int> &want_to_encode,
const bufferlist &in,
map<int, bufferlist> *encoded)
return 0;
}
+[[deprecated]]
int ErasureCodeShec::encode_chunks(const set<int> &want_to_encode,
map<int, bufferlist> *encoded)
{
shec_encode(&chunks[0], &chunks[k], (*encoded)[0].length());
return 0;
}
+END_IGNORE_DEPRECATED
+
+int ErasureCodeShec::encode_chunks(const shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out)
+{
+ char *chunks[k + m]; //TODO don't use variable length arrays
+ memset(chunks, 0, sizeof(char*) * (k + m));
+ uint64_t size = 0;
+
+ for (auto &&[shard, ptr] : in) {
+ if (size == 0) size = ptr.length();
+ else ceph_assert(size == ptr.length());
+ chunks[static_cast<int>(shard)] = const_cast<char*>(ptr.c_str());
+ }
+
+ for (auto &&[shard, ptr] : out) {
+ if (size == 0) size = ptr.length();
+ else ceph_assert(size == ptr.length());
+ chunks[static_cast<int>(shard)] = ptr.c_str();
+ }
+
+ char *zeros = nullptr;
+
+ for (shard_id_t i; i < k + m; ++i) {
+ if (in.contains(i) || out.contains(i)) continue;
+
+ if (zeros == nullptr) {
+ zeros = (char*)malloc(size);
+ memset(zeros, 0, size);
+ }
+
+ chunks[static_cast<int>(i)] = zeros;
+ }
+
+ shec_encode(&chunks[0], &chunks[k], size);
+
+ if (zeros != nullptr) free(zeros);
+
+ return 0;
+}
+IGNORE_DEPRECATED
+[[deprecated]]
int ErasureCodeShec::_decode(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded)
return decode_chunks(want_to_read, chunks, decoded);
}
+[[deprecated]]
int ErasureCodeShec::decode_chunks(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded)
return 0;
}
}
+END_IGNORE_DEPRECATED
+
+int ErasureCodeShec::decode_chunks(const shard_id_set &want_to_read,
+ shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out)
+{
+ unsigned int size = 0;
+ int erased[k + m];
+ int erased_count = 0;
+ int avails[k + m];
+ char *data[k];
+ char *coding[m];
+
+ for (auto &&[shard, ptr] : in) {
+ if (size == 0) size = ptr.length();
+ else ceph_assert(size == ptr.length());
+ if (shard < k) {
+ data[static_cast<int>(shard)] = ptr.c_str();
+ }
+ else {
+ coding[static_cast<int>(shard) - k] = ptr.c_str();
+ }
+ avails[static_cast<int>(shard)] = 1;
+ erased[static_cast<int>(shard)] = 0;
+ }
+
+ for (auto &&[shard, ptr] : out) {
+ if (size == 0) size = ptr.length();
+ else ceph_assert(size == ptr.length());
+ if (shard < k) {
+ data[static_cast<int>(shard)] = ptr.c_str();
+ }
+ else {
+ coding[static_cast<int>(shard) - k] = ptr.c_str();
+ }
+ avails[static_cast<int>(shard)] = 0;
+ if (want_to_read.count(shard) > 0) {
+ erased[static_cast<int>(shard)] = 1;
+ erased_count++;
+ }
+ else {
+ erased[static_cast<int>(shard)] = 0;
+ }
+ }
+
+ if (erased_count > 0) {
+ return shec_decode(erased, avails, data, coding, size);
+ } else {
+ return 0;
+ }
+}
//
// ErasureCodeShecReedSolomonVandermonde
galois_w16_region_multiply(input_data, matrix[static_cast<int>(datashard) + (k * (static_cast<int>(codingshard) - k))], blocksize, output_data, 1);
break;
case 32:
- galois_w32_region_multiply(input_data, matrix[datashard + (k * (codingshard - k))], blocksize, output_data, 1);
+ galois_w32_region_multiply(input_data, matrix[static_cast<int>(datashard) + (k * (int(codingshard) - k))], blocksize, output_data, 1);
break;
}
}
w(0),
DEFAULT_W(8),
technique(_technique),
- matrix(0)
+ matrix(nullptr)
{}
~ErasureCodeShec() override {}
unsigned int get_chunk_size(unsigned int stripe_width) const override;
+ using ErasureCode::_minimum_to_decode;
+ [[deprecated]]
int _minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available_chunks,
- std::set<int> *minimum);
+ std::set<int> *minimum) override;
+ int _minimum_to_decode(const shard_id_set &want_to_read,
+ const shard_id_set &available_chunks,
+ shard_id_set *minimum) override;
+
+ [[deprecated]]
int minimum_to_decode_with_cost(const std::set<int> &want_to_read,
const std::map<int, int> &available,
std::set<int> *minimum) override;
+ int minimum_to_decode_with_cost(const shard_id_set &want_to_read,
+ const shard_id_map<int> &available,
+ shard_id_set *minimum) override;
+
+ using ErasureCode::encode;
+ [[deprecated]]
int encode(const std::set<int> &want_to_encode,
const ceph::buffer::list &in,
std::map<int, ceph::buffer::list> *encoded) override;
+
+
+ [[deprecated]]
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, ceph::buffer::list> *encoded) override;
+ int encode_chunks(const shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) override;
+ using ErasureCode::_decode;
+ [[deprecated]]
int _decode(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
+ [[deprecated]]
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
+ int decode_chunks(const shard_id_set &want_to_read,
+ shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) override;
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
virtual void shec_encode(char **data,
void encode_delta(const ceph::bufferptr &old_data,
const ceph::bufferptr &new_data,
- ceph::bufferptr *delta_maybe_in_place);
- void apply_delta(const shard_id_map<ceph::bufferptr> &in,
- shard_id_map<ceph::bufferptr> &out);
+ ceph::bufferptr *delta_maybe_in_place) override;
+ void apply_delta(const shard_id_map<ceph::bufferptr> &in,
+ shard_id_map<ceph::bufferptr> &out) override;
unsigned get_alignment() const override;
size_t get_minimum_granularity() override
private:
int parse(const ceph::ErasureCodeProfile &profile) override;
};
+static_assert(!std::is_abstract<ErasureCodeShecReedSolomonVandermonde>());
#endif
dout(20) << __func__ << ": Checking hash of " << i->first << dendl;
bufferhash h(-1);
h << bl;
- if (h.digest() != hinfo->get_chunk_hash(static_cast<int>(shard))) {
+ if (h.digest() != hinfo->get_chunk_hash(shard)) {
get_parent()->clog_error() << "Bad hash for " << i->first << " digest 0x"
- << hex << h.digest() << " expected 0x" << hinfo->get_chunk_hash(static_cast<int>(shard)) << dec;
+ << hex << h.digest() << " expected 0x" << hinfo->get_chunk_hash(shard) << dec;
dout(5) << __func__ << ": Bad hash for " << i->first << " digest 0x"
- << hex << h.digest() << " expected 0x" << hinfo->get_chunk_hash(static_cast<int>(shard)) << dec << dendl;
+ << hex << h.digest() << " expected 0x" << hinfo->get_chunk_hash(shard) << dec << dendl;
r = -EIO;
goto error;
}
return 0;
}
- if (hinfo->get_chunk_hash(static_cast<int>(get_parent()->whoami_shard().shard)) !=
+ if (hinfo->get_chunk_hash(get_parent()->whoami_shard().shard) !=
pos.data_hash.digest()) {
dout(0) << "_scan_list " << poid << " got incorrect hash on read 0x"
<< std::hex << pos.data_hash.digest() << " != expected 0x"
- << hinfo->get_chunk_hash(static_cast<int>(get_parent()->whoami_shard().shard))
+ << hinfo->get_chunk_hash(get_parent()->whoami_shard().shard)
<< std::dec << dendl;
o.ec_hash_mismatch = true;
return 0;
* we match our chunk hash and our recollection of the hash for
* chunk 0 matches that of our peers, there is likely no corruption.
*/
- o.digest = hinfo->get_chunk_hash(0);
+ o.digest = hinfo->get_chunk_hash(shard_id_t(0));
o.digest_present = true;
} else {
/* Hack! We must be using partial overwrites, and partial overwrites
#include "ExtentCache.h"
#include "ECListener.h"
+/* This file is soon going to be replaced (before next release), so we are going
+ * to simply ignore all deprecated warnings.
+ * */
+IGNORE_DEPRECATED
+
//forward declaration
struct ECSubWrite;
struct ECSubWriteReply;
have.insert(static_cast<int>(i->shard));
}
std::map<int, std::vector<std::pair<int, int>>> min;
+
return ec_impl->minimum_to_decode(want, have, &min) == 0;
}
};
}
};
ostream &operator<<(ostream &lhs, const ECBackend::RMWPipeline::pipeline_state_t &rhs);
+
+END_IGNORE_DEPRECATED
have.insert(static_cast<int>(i->shard));
}
std::map<int, std::vector<std::pair<int, int>>> min;
+IGNORE_DEPRECATED
return ec_impl->minimum_to_decode(want, have, &min) == 0;
+END_IGNORE_DEPRECATED
}
};
std::unique_ptr<ECRecPred> get_is_recoverable_predicate() const {
#undef dout_prefix
#define dout_prefix _prefix(_dout, this)
+/* This file is soon going to be replaced (before next release), so we are going
+ * to simply ignore all deprecated warnings.
+ * */
+IGNORE_DEPRECATED
+
using std::dec;
using std::hex;
using std::less;
const auto distance =
std::min(right_chunk_index - left_chunk_index, (uint64_t)sinfo.get_k());
for(uint64_t i = 0; i < distance; i++) {
- auto raw_shard = (left_chunk_index + i) % sinfo.get_k();
- want_to_read->insert(sinfo.get_shard(raw_shard));
+ raw_shard_id_t raw_shard((left_chunk_index + i) % sinfo.get_k());
+ want_to_read->insert(static_cast<int>(sinfo.get_shard(raw_shard)));
}
}
void ECCommon::ReadPipeline::get_want_to_read_shards(
std::set<int> *want_to_read) const
{
- for (int i = 0; i < (int)sinfo.get_k(); ++i) {
- want_to_read->insert(sinfo.get_shard(i));
+ for (raw_shard_id_t i; i < (int)sinfo.get_k(); ++i) {
+ want_to_read->insert(static_cast<int>(sinfo.get_shard(i)));
}
}
uint64_t chunk_size = read_pipeline.sinfo.get_chunk_size();
uint64_t trim_offset = 0;
for (auto shard : wanted_to_read) {
- if (read_pipeline.sinfo.get_raw_shard(shard) * chunk_size <
- aligned_offset_in_stripe) {
+ int s = static_cast<int>(read_pipeline.sinfo.get_raw_shard(shard_id_t(shard)));
+ if ( s * chunk_size < aligned_offset_in_stripe) {
trim_offset += chunk_size;
} else {
break;
}
return ref;
}
+
+END_IGNORE_DEPRECATED
using ceph::ErasureCodeInterfaceRef;
using ceph::Formatter;
+IGNORE_DEPRECATED
+
namespace ECLegacy {
static ostream& _prefix(std::ostream *_dout, ECCommonL::RMWPipeline *rmw_pipeline) {
return rmw_pipeline->get_parent()->gen_dbg_prefix(*_dout) << "ECCommonL ";
return ref;
}
}
+
+END_IGNORE_DEPRECATED
\ No newline at end of file
#pragma once
+#include "include/types.h"
+#include "common/mini_flat_map.h"
+
struct ec_align_t {
uint64_t offset;
uint64_t size;
<< rhs.size << ","
<< rhs.flags;
}
+ ec_align_t(std::pair<uint64_t, uint64_t> p, uint32_t flags)
+ : offset(p.first), size(p.second), flags(flags) {}
+ ec_align_t(uint64_t offset, uint64_t size, uint32_t flags)
+ : offset(offset), size(size), flags(flags) {}
+ bool operator==(const ec_align_t &other) const;
+};
+
+struct raw_shard_id_t {
+ int8_t id;
+
+ raw_shard_id_t() : id(0) {}
+ explicit constexpr raw_shard_id_t(int8_t _id) : id(_id) {}
+
+ explicit constexpr operator int8_t() const { return id; }
+ // For convenient use in comparisons
+ explicit constexpr operator int() const { return id; }
+ explicit constexpr operator uint64_t() const { return id; }
+
+ const static raw_shard_id_t NO_SHARD;
+
+ void encode(ceph::buffer::list &bl) const {
+ using ceph::encode;
+ encode(id, bl);
+ }
+ void decode(ceph::buffer::list::const_iterator &bl) {
+ using ceph::decode;
+ decode(id, bl);
+ }
+ void dump(ceph::Formatter *f) const {
+ f->dump_int("id", id);
+ }
+ static void generate_test_instances(std::list<raw_shard_id_t*>& ls) {
+ ls.push_back(new raw_shard_id_t(1));
+ ls.push_back(new raw_shard_id_t(2));
+ }
+ raw_shard_id_t& operator++() { ++id; return *this; }
+ friend constexpr std::strong_ordering operator<=>(const raw_shard_id_t &lhs, const raw_shard_id_t &rhs) { return lhs.id <=> rhs.id; }
+ friend constexpr std::strong_ordering operator<=>(int lhs, const raw_shard_id_t &rhs) { return lhs <=> rhs.id; }
+ friend constexpr std::strong_ordering operator<=>(const raw_shard_id_t &lhs, int rhs) { return lhs.id <=> rhs; }
+
+ raw_shard_id_t& operator=(int other) { id = other; return *this; }
+ bool operator==(const raw_shard_id_t &other) const { return id == other.id; }
};
+template <typename T>
+using shard_id_map = mini_flat_map<shard_id_t, T>;
#include "include/encoding.h"
#include "ECUtil.h"
+/* This file is soon going to be replaced (before next release), so we are going
+ * to simply ignore all deprecated warnings.
+ * */
+IGNORE_DEPRECATED
+
using namespace std;
using ceph::bufferlist;
using ceph::ErasureCodeInterfaceRef;
{
return HINFO_KEY;
}
+
+END_IGNORE_DEPRECATED
const uint64_t chunk_size;
const unsigned int k; // Can be calculated with a division from above. Better to cache.
const unsigned int m;
- const std::vector<int> chunk_mapping;
- const std::vector<unsigned int> chunk_mapping_reverse;
+ const std::vector<shard_id_t> chunk_mapping;
+ const std::vector<raw_shard_id_t> chunk_mapping_reverse;
private:
- static std::vector<int> complete_chunk_mapping(
- std::vector<int> _chunk_mapping, unsigned int n)
+ static std::vector<shard_id_t> complete_chunk_mapping(
+ std::vector<shard_id_t> _chunk_mapping, unsigned int n)
{
unsigned int size = _chunk_mapping.size();
- std::vector<int> chunk_mapping(n);
- for (unsigned int i = 0; i < n; i++) {
+ std::vector<shard_id_t> chunk_mapping(n);
+ for (shard_id_t i; i < n; ++i) {
if (size > i) {
- chunk_mapping.at(i) = _chunk_mapping.at(i);
+ chunk_mapping.at(static_cast<int>(i)) = _chunk_mapping.at(static_cast<int>(i));
} else {
- chunk_mapping.at(i) = static_cast<int>(i);
+ chunk_mapping.at(static_cast<int>(i)) = i;
}
}
return chunk_mapping;
}
- static std::vector<unsigned int> reverse_chunk_mapping(
- std::vector<int> chunk_mapping)
+ static std::vector<raw_shard_id_t> reverse_chunk_mapping(
+ std::vector<shard_id_t> chunk_mapping)
{
unsigned int size = chunk_mapping.size();
- std::vector<unsigned int> reverse(size);
- std::vector<bool> used(size,false);
- for (unsigned int i = 0; i < size; i++) {
- int index = chunk_mapping.at(i);
+ std::vector<raw_shard_id_t> reverse(size);
+ shard_id_set used;
+ for (raw_shard_id_t i; i < size; ++i) {
+ shard_id_t index = chunk_mapping.at(static_cast<int>(i));
// Mapping must be a bijection and a permutation
- ceph_assert(!used.at(index));
- used.at(index) = true;
- reverse.at(index) = i;
+ ceph_assert(!used.contains(index));
+ used.insert(index);
+ reverse.at(static_cast<int>(index)) = i;
}
return reverse;
}
chunk_size(stripe_width / k),
k(k),
m(m),
- chunk_mapping(complete_chunk_mapping(std::vector<int>(), k + m)),
+ chunk_mapping(complete_chunk_mapping(std::vector<shard_id_t>(), k + m)),
chunk_mapping_reverse(reverse_chunk_mapping(chunk_mapping)) {
ceph_assert(stripe_width % k == 0);
}
stripe_info_t(unsigned int k, unsigned int m, uint64_t stripe_width,
- std::vector<int> _chunk_mapping)
+ std::vector<shard_id_t> _chunk_mapping)
: stripe_width(stripe_width),
chunk_size(stripe_width / k),
k(k),
unsigned int get_k_plus_m() const {
return k + m;
}
- int get_shard(unsigned int raw_shard) const {
- return chunk_mapping[raw_shard];
+ shard_id_t get_shard(raw_shard_id_t raw_shard) const {
+ return chunk_mapping[static_cast<int>(raw_shard)];
}
- unsigned int get_raw_shard(int shard) const {
- return chunk_mapping_reverse[shard];
+ raw_shard_id_t get_raw_shard(shard_id_t shard) const {
+ return chunk_mapping_reverse[static_cast<int>(shard)];
}
uint64_t logical_to_prev_chunk_offset(uint64_t offset) const {
return (offset / stripe_width) * chunk_size;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<HashInfo*>& o);
- uint32_t get_chunk_hash(int shard) const {
- ceph_assert((unsigned)shard < cumulative_shard_hashes.size());
- return cumulative_shard_hashes[shard];
+ uint32_t get_chunk_hash(shard_id_t shard) const {
+ ceph_assert(shard < cumulative_shard_hashes.size());
+ return cumulative_shard_hashes[static_cast<int>(shard)];
}
uint64_t get_total_chunk_size() const {
return total_chunk_size;
chunks[j->first].substr_of(j->second, i, sinfo.get_chunk_size());
}
bufferlist bl;
+IGNORE_DEPRECATED
int r = ec_impl->decode_concat(want_to_read, chunks, &bl);
+END_IGNORE_DEPRECATED
ceph_assert(r == 0);
ceph_assert(bl.length() % sinfo.get_chunk_size() == 0);
out->claim_append(bl);
}
map<int, vector<pair<int, int>>> min;
+IGNORE_DEPRECATED
int r = ec_impl->minimum_to_decode(need, avail, &min);
+END_IGNORE_DEPRECATED
ceph_assert(r == 0);
int chunks_count = 0;
repair_data_per_chunk);
}
map<int, bufferlist> out_bls;
+ IGNORE_DEPRECATED
r = ec_impl->decode(need, chunks, &out_bls, sinfo.get_chunk_size());
+ END_IGNORE_DEPRECATED
ceph_assert(r == 0);
for (auto j = out.begin(); j != out.end(); ++j) {
ceph_assert(out_bls.count(j->first));
map<int, bufferlist> encoded;
bufferlist buf;
buf.substr_of(in, i, sinfo.get_stripe_width());
+IGNORE_DEPRECATED
int r = ec_impl->encode(want, buf, &encoded);
+END_IGNORE_DEPRECATED
ceph_assert(r == 0);
for (map<int, bufferlist>::iterator i = encoded.begin();
i != encoded.end();
const std::vector<unsigned int> chunk_mapping_reverse;
private:
static std::vector<int> complete_chunk_mapping(
- std::vector<int> _chunk_mapping, unsigned int n)
+ std::vector<shard_id_t> _chunk_mapping, unsigned int n)
{
unsigned int size = _chunk_mapping.size();
std::vector<int> chunk_mapping(n);
for (unsigned int i = 0; i < n; i++) {
if (size > i) {
- chunk_mapping.at(i) = _chunk_mapping.at(i);
+ chunk_mapping.at(i) = int(_chunk_mapping.at(i));
} else {
chunk_mapping.at(i) = static_cast<int>(i);
}
chunk_size(stripe_width / k),
k(k),
m(m),
- chunk_mapping(complete_chunk_mapping(std::vector<int>(), k + m)),
+ chunk_mapping(complete_chunk_mapping(std::vector<shard_id_t>(), k + m)),
chunk_mapping_reverse(reverse_chunk_mapping(chunk_mapping)) {
ceph_assert(stripe_width % k == 0);
}
stripe_info_t(unsigned int k, unsigned int m, uint64_t stripe_width,
- std::vector<int> _chunk_mapping)
+ std::vector<shard_id_t> _chunk_mapping)
: stripe_width(stripe_width),
chunk_size(stripe_width / k),
k(k),
snapid_t,
mempool::osdmap::flat_map> snap_interval_set_t;
+using shard_id_set = bitset_set<128, shard_id_t>;
+WRITE_CLASS_DENC(shard_id_set)
/**
* osd request identifier
#include "osd/osd_types.h"
#include "erasure-code/ErasureCode.h"
+// Chunk version is deprecated.
#define FIRST_DATA_CHUNK 0
#define SECOND_DATA_CHUNK 1
+#define FIRST_DATA_SHARD shard_id_t(0)
+#define SECOND_DATA_SHARD shard_id_t(1)
#define DATA_CHUNKS 2u
+#define DATA_SHARDS 2u
#define CODING_CHUNK 2
+#define CODING_SHARD shard_id_t(2)
+
#define CODING_CHUNKS 1u
+#define CODING_SHARDS 1u
#define MINIMUM_TO_RECOVER 2u
"indep", pg_pool_t::TYPE_ERASURE, ss);
}
+ IGNORE_DEPRECATED
+ [[deprecated]]
int minimum_to_decode_with_cost(const std::set<int> &want_to_read,
const std::map<int, int> &available,
std::set<int> *minimum) override {
available_chunks.insert(i->first);
return _minimum_to_decode(want_to_read, available_chunks, minimum);
}
+ END_IGNORE_DEPRECATED
+
+ int minimum_to_decode_with_cost(const shard_id_set &want_to_read,
+ const shard_id_map<int> &available,
+ shard_id_set *minimum) override {
+ //
+ // If one chunk is more expensive to fetch than the others,
+ // recover it instead. For instance, if the cost reflects the
+ // time it takes for a chunk to be retrieved from a remote
+ // OSD and if CPU is cheap, it could make sense to recover
+ // instead of fetching the chunk.
+ //
+ shard_id_map<int> c2c(available);
+ if (c2c.size() > DATA_SHARDS) {
+ if (c2c[FIRST_DATA_SHARD] > c2c[SECOND_DATA_SHARD] &&
+ c2c[FIRST_DATA_SHARD] > c2c[CODING_SHARD])
+ c2c.erase(FIRST_DATA_SHARD);
+ else if(c2c[SECOND_DATA_SHARD] > c2c[FIRST_DATA_SHARD] &&
+ c2c[SECOND_DATA_SHARD] > c2c[CODING_SHARD])
+ c2c.erase(SECOND_DATA_SHARD);
+ else if(c2c[CODING_SHARD] > c2c[FIRST_DATA_SHARD] &&
+ c2c[CODING_SHARD] > c2c[SECOND_DATA_SHARD])
+ c2c.erase(CODING_SHARD);
+ }
+ shard_id_set available_chunks;
+ for (shard_id_map<int>::const_iterator i = c2c.cbegin();
+ i != c2c.cend();
+ ++i)
+ available_chunks.insert(i->first);
+ return _minimum_to_decode(want_to_read, available_chunks, minimum);
+ }
uint64_t get_supported_optimizations() const override {
return FLAG_EC_PLUGIN_PARTIAL_READ_OPTIMIZATION;
return 1;
}
+ [[deprecated]]
int encode(const std::set<int> &want_to_encode,
const bufferlist &in,
std::map<int, bufferlist> *encoded) override {
return 0;
}
+ int encode(const shard_id_set &want_to_encode,
+ const bufferlist &in,
+ shard_id_map<bufferlist> *encoded) override {
+ //
+ // make sure all data chunks have the same length, allocating
+ // padding if necessary.
+ //
+ unsigned int chunk_length = get_chunk_size(in.length());
+ bufferlist out(in);
+ unsigned int width = get_chunk_count() * get_chunk_size(in.length());
+ bufferptr pad(width - in.length());
+ pad.zero(0, get_data_chunk_count());
+ out.push_back(pad);
+ //
+ // compute the coding chunk with first chunk ^ second chunk
+ //
+ char *p = out.c_str();
+ for (unsigned i = 0; i < chunk_length; i++)
+ p[i + int(CODING_SHARD) * chunk_length] =
+ p[i + int(FIRST_DATA_SHARD) * chunk_length] ^
+ p[i + int(SECOND_DATA_SHARD) * chunk_length];
+ //
+ // populate the bufferlist with bufferptr pointing
+ // to chunk boundaries
+ //
+ const bufferptr &ptr = out.front();
+ for (auto j = want_to_encode.begin();
+ j != want_to_encode.end();
+ ++j) {
+ bufferlist tmp;
+ bufferptr chunk(ptr, int(*j) * chunk_length, chunk_length);
+ tmp.push_back(chunk);
+ tmp.claim_append((*encoded)[*j]);
+ (*encoded)[*j].swap(tmp);
+ }
+ return 0;
+ }
+
+ [[deprecated]]
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, bufferlist> *encoded) override {
ceph_abort();
return 0;
}
+ int encode_chunks(const shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) override {
+ ceph_abort();
+ return 0;
+ }
+
+ void encode_delta(const bufferptr &old_data,
+ const bufferptr &new_data,
+ bufferptr *delta_maybe_in_place) {
+ ceph_abort();
+ }
+
+ void apply_delta(const shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) override {
+ ceph_abort();
+ }
+
+ [[deprecated]]
int _decode(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded) override {
return 0;
}
+
+ int _decode(const shard_id_set &want_to_read,
+ const shard_id_map<bufferlist> &chunks,
+ shard_id_map<bufferlist> *decoded) override {
+ //
+ // All chunks have the same size
+ //
+ unsigned chunk_length = (*chunks.begin()).second.length();
+ for (shard_id_set::const_iterator i = want_to_read.begin();
+ i != want_to_read.end();
+ ++i) {
+ if (chunks.find(*i) != chunks.end()) {
+ //
+ // If the chunk is available, just copy the bufferptr pointer
+ // to the decoded argument.
+ //
+ (*decoded)[*i] = chunks.find(*i)->second;
+ } else if(chunks.size() != 2) {
+ //
+ // If a chunk is missing and there are not enough chunks
+ // to recover, abort.
+ //
+ return -ERANGE;
+ } else {
+ //
+ // No matter what the missing chunk is, XOR of the other
+ // two recovers it.
+ //
+ shard_id_map<bufferlist>::const_iterator k = chunks.begin();
+ const char *a = k->second.front().c_str();
+ ++k;
+ const char *b = k->second.front().c_str();
+ bufferptr chunk(chunk_length);
+ char *c = chunk.c_str();
+ for (unsigned j = 0; j < chunk_length; j++) {
+ c[j] = a[j] ^ b[j];
+ }
+
+ bufferlist tmp;
+ tmp.append(chunk);
+ tmp.claim_append((*decoded)[*i]);
+ (*decoded)[*i].swap(tmp);
+ }
+ }
+ return 0;
+ }
+
+ [[deprecated]]
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded) override {
return 0;
}
- const std::vector<int> &get_chunk_mapping() const override {
- static std::vector<int> mapping;
+ int decode_chunks(const shard_id_set &want_to_read,
+ shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) override {
+ ceph_abort();
+ return 0;
+ }
+
+ const std::vector<shard_id_t> &get_chunk_mapping() const override {
+ static std::vector<shard_id_t> mapping;
return mapping;
}
class ErasureCodeTest : public ErasureCode {
public:
- map<int, bufferlist> encode_chunks_encoded;
+ shard_id_map<bufferlist> encode_chunks_encoded;
unsigned int k;
unsigned int m;
unsigned int chunk_size;
ErasureCodeTest(unsigned int _k, unsigned int _m, unsigned int _chunk_size) :
- k(_k), m(_m), chunk_size(_chunk_size) {}
+ encode_chunks_encoded(_k + _m), k(_k), m(_m), chunk_size(_chunk_size) {}
~ErasureCodeTest() override {}
int init(ErasureCodeProfile &profile, ostream *ss) override {
return chunk_size;
}
size_t get_minimum_granularity() override { return 1; }
+ [[deprecated]]
int encode_chunks(const set<int> &want_to_encode,
map<int, bufferlist> *encoded) override {
- encode_chunks_encoded = *encoded;
+ ceph_abort_msg("Only new API is tested");
+ //encode_chunks_encoded = *encoded;
return 0;
}
+ int encode_chunks(const shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) override {
+ return 0;
+ }
+ [[deprecated]]
int decode_chunks(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded) override {
ceph_abort_msg("ErasureCode::decode_chunks not implemented");
}
+ int decode_chunks(const shard_id_set &want_to_read,
+ shard_id_map<bufferptr> &in,
+ shard_id_map<bufferptr> &out) override {
+ ceph_abort_msg("ErasureCode::decode_chunks not implemented");
+ }
int create_rule(const string &name,
CrushWrapper &crush,
ostream *ss) const override { return 0; }
};
+static_assert(!std::is_abstract<ErasureCodeTest>());
/*
* If we have a buffer of 5 bytes (X below) and a chunk size of 3
unsigned chunk_size = ErasureCode::SIMD_ALIGN * 7;
ErasureCodeTest erasure_code(k, m, chunk_size);
- set<int> want_to_encode;
- for (unsigned int i = 0; i < erasure_code.get_chunk_count(); i++)
- want_to_encode.insert(i);
+ shard_id_set want_to_encode;
+ want_to_encode.insert_range(shard_id_t(0), erasure_code.get_chunk_count());
string data(chunk_size + chunk_size / 2, 'X'); // uses 1.5 chunks out of 3
// make sure nothing is memory aligned
bufferptr ptr(buffer::create_aligned(data.length() + 1, ErasureCode::SIMD_ALIGN));
ptr.set_length(data.length());
bufferlist in;
in.append(ptr);
- map<int, bufferlist> encoded;
+ shard_id_map<bufferlist> encoded(k+m);
ASSERT_FALSE(in.is_aligned(ErasureCode::SIMD_ALIGN));
ASSERT_EQ(0, erasure_code.encode(want_to_encode, in, &encoded));
- for (unsigned int i = 0; i < erasure_code.get_chunk_count(); i++)
+ for (shard_id_t i; i < erasure_code.get_chunk_count(); ++i)
ASSERT_TRUE(encoded[i].is_aligned(ErasureCode::SIMD_ALIGN));
for (unsigned i = 0; i < chunk_size / 2; i++)
- ASSERT_EQ(encoded[1][i], 'X');
- ASSERT_NE(encoded[1][chunk_size / 2], 'X');
+ ASSERT_EQ(encoded[shard_id_t(1)][i], 'X');
+ ASSERT_NE(encoded[shard_id_t(1)][chunk_size / 2], 'X');
}
TEST(ErasureCodeTest, encode_misaligned_non_contiguous)
unsigned chunk_size = ErasureCode::SIMD_ALIGN * 7;
ErasureCodeTest erasure_code(k, m, chunk_size);
- set<int> want_to_encode;
- for (unsigned int i = 0; i < erasure_code.get_chunk_count(); i++)
+ shard_id_set want_to_encode;
+ for (shard_id_t i; i < erasure_code.get_chunk_count(); ++i)
want_to_encode.insert(i);
string data(chunk_size, 'X');
// create a non contiguous bufferlist where the frist and the second
bufferptr ptr(buffer::create_aligned(data.length() + 1, ErasureCode::SIMD_ALIGN));
in.append(ptr);
}
- map<int, bufferlist> encoded;
+ shard_id_map<bufferlist> encoded(k + m);
ASSERT_FALSE(in.is_contiguous());
ASSERT_TRUE(in.front().is_aligned(ErasureCode::SIMD_ALIGN));
ASSERT_TRUE(in.back().is_aligned(ErasureCode::SIMD_ALIGN));
ASSERT_FALSE(in.back().is_n_align_sized(chunk_size));
ASSERT_EQ(0, erasure_code.encode(want_to_encode, in, &encoded));
- for (unsigned int i = 0; i < erasure_code.get_chunk_count(); i++) {
+ for (shard_id_t i; i < erasure_code.get_chunk_count(); ++i) {
ASSERT_TRUE(encoded[i].is_aligned(ErasureCode::SIMD_ALIGN));
ASSERT_TRUE(encoded[i].is_n_align_sized(chunk_size));
}
#include "common/config_proxy.h"
#include "gtest/gtest.h"
+// FIXME: Clay is not yet supported in new EC.
+IGNORE_DEPRECATED
+
using namespace std;
TEST(ErasureCodeClay, sanity_check_k)
}
}
+END_IGNORE_DEPRECATED
+
/*
* Local Variables:
* compile-command: "cd ../.. ;
#include "global/global_context.h"
#include "gtest/gtest.h"
+IGNORE_DEPRECATED
+
using namespace std;
TEST(ErasureCodeExample, chunk_size)
TEST(ErasureCodeExample, minimum_to_decode)
{
ErasureCodeExample example;
- set<int> available_chunks;
- set<int> want_to_read;
- want_to_read.insert(1);
+ shard_id_set available_chunks;
+ shard_id_set want_to_read;
+ want_to_read.insert(shard_id_t(1));
{
- set<int> minimum;
+ shard_id_set minimum;
EXPECT_EQ(-EIO, example._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
}
- available_chunks.insert(0);
- available_chunks.insert(2);
+ available_chunks.insert(shard_id_t(0));
+ available_chunks.insert(shard_id_t(2));
{
- set<int> minimum;
+ shard_id_set minimum;
EXPECT_EQ(0, example._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_EQ(available_chunks, minimum);
EXPECT_EQ(2u, minimum.size());
- EXPECT_EQ(1u, minimum.count(0));
- EXPECT_EQ(1u, minimum.count(2));
+ EXPECT_EQ(1u, minimum.count(shard_id_t(0)));
+ EXPECT_EQ(1u, minimum.count(shard_id_t(2)));
}
{
- set<int> minimum;
- available_chunks.insert(1);
+ shard_id_set minimum;
+ available_chunks.insert(shard_id_t(1));
EXPECT_EQ(0, example._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_EQ(1u, minimum.size());
- EXPECT_EQ(1u, minimum.count(1));
+ EXPECT_EQ(1u, minimum.count(shard_id_t(1)));
}
}
TEST(ErasureCodeExample, minimum_to_decode_with_cost)
{
ErasureCodeExample example;
- map<int,int> available;
- set<int> want_to_read;
- want_to_read.insert(1);
+ shard_id_map<int> available(example.get_chunk_count());
+ shard_id_set want_to_read;
+ want_to_read.insert(shard_id_t(1));
{
- set<int> minimum;
+ shard_id_set minimum;
EXPECT_EQ(-EIO, example.minimum_to_decode_with_cost(want_to_read,
available,
&minimum));
}
- available[0] = 1;
- available[2] = 1;
+ available[shard_id_t(0)] = 1;
+ available[shard_id_t(2)] = 1;
{
- set<int> minimum;
+ shard_id_set minimum;
EXPECT_EQ(0, example.minimum_to_decode_with_cost(want_to_read,
available,
&minimum));
EXPECT_EQ(2u, minimum.size());
- EXPECT_EQ(1u, minimum.count(0));
- EXPECT_EQ(1u, minimum.count(2));
+ EXPECT_EQ(1u, minimum.count(shard_id_t(0)));
+ EXPECT_EQ(1u, minimum.count(shard_id_t(2)));
}
{
- set<int> minimum;
- available[1] = 1;
+ shard_id_set minimum;
+ available[shard_id_t(1)] = 1;
EXPECT_EQ(0, example.minimum_to_decode_with_cost(want_to_read,
available,
&minimum));
EXPECT_EQ(1u, minimum.size());
- EXPECT_EQ(1u, minimum.count(1));
+ EXPECT_EQ(1u, minimum.count(shard_id_t(1)));
}
{
- set<int> minimum;
- available[1] = 2;
+ shard_id_set minimum;
+ available[shard_id_t(1)] = 2;
EXPECT_EQ(0, example.minimum_to_decode_with_cost(want_to_read,
available,
&minimum));
EXPECT_EQ(2u, minimum.size());
- EXPECT_EQ(1u, minimum.count(0));
- EXPECT_EQ(1u, minimum.count(2));
+ EXPECT_EQ(1u, minimum.count(shard_id_t(0)));
+ EXPECT_EQ(1u, minimum.count(shard_id_t(2)));
}
}
bufferlist in;
in.append("ABCDE");
- set<int> want_to_encode;
+ shard_id_set want_to_encode;
for(unsigned int i = 0; i < example.get_chunk_count(); i++)
- want_to_encode.insert(i);
- map<int, bufferlist> encoded;
+ want_to_encode.insert(shard_id_t(i));
+ shard_id_map<bufferlist> encoded(example.get_chunk_count());
EXPECT_EQ(0, example.encode(want_to_encode, in, &encoded));
EXPECT_EQ(example.get_chunk_count(), encoded.size());
- EXPECT_EQ(example.get_chunk_size(in.length()), encoded[0].length());
- EXPECT_EQ('A', encoded[0][0]);
- EXPECT_EQ('B', encoded[0][1]);
- EXPECT_EQ('C', encoded[0][2]);
- EXPECT_EQ('D', encoded[1][0]);
- EXPECT_EQ('E', encoded[1][1]);
- EXPECT_EQ('A'^'D', encoded[2][0]);
- EXPECT_EQ('B'^'E', encoded[2][1]);
- EXPECT_EQ('C'^0, encoded[2][2]);
+ EXPECT_EQ(example.get_chunk_size(in.length()), encoded[shard_id_t(0)].length());
+ EXPECT_EQ('A', encoded[shard_id_t(0)][0]);
+ EXPECT_EQ('B', encoded[shard_id_t(0)][1]);
+ EXPECT_EQ('C', encoded[shard_id_t(0)][2]);
+ EXPECT_EQ('D', encoded[shard_id_t(1)][0]);
+ EXPECT_EQ('E', encoded[shard_id_t(1)][1]);
+ EXPECT_EQ('A'^'D', encoded[shard_id_t(2)][0]);
+ EXPECT_EQ('B'^'E', encoded[shard_id_t(2)][1]);
+ EXPECT_EQ('C'^0, encoded[shard_id_t(2)][2]);
// all chunks are available
{
int want_to_decode[] = { 0, 1 };
- map<int, bufferlist> decoded;
- EXPECT_EQ(0, example._decode(set<int>(want_to_decode, want_to_decode+2),
+ shard_id_map<bufferlist> decoded(example.get_chunk_count());
+ EXPECT_EQ(0, example._decode(shard_id_set (want_to_decode, want_to_decode+2),
encoded,
&decoded));
EXPECT_EQ(2u, decoded.size());
- EXPECT_EQ(3u, decoded[0].length());
- EXPECT_EQ('A', decoded[0][0]);
- EXPECT_EQ('B', decoded[0][1]);
- EXPECT_EQ('C', decoded[0][2]);
- EXPECT_EQ('D', decoded[1][0]);
- EXPECT_EQ('E', decoded[1][1]);
+ EXPECT_EQ(3u, decoded[shard_id_t(0)].length());
+ EXPECT_EQ('A', decoded[shard_id_t(0)][0]);
+ EXPECT_EQ('B', decoded[shard_id_t(0)][1]);
+ EXPECT_EQ('C', decoded[shard_id_t(0)][2]);
+ EXPECT_EQ('D', decoded[shard_id_t(1)][0]);
+ EXPECT_EQ('E', decoded[shard_id_t(1)][1]);
}
- // one chunk is missing
+ // one chunk is missing
{
- map<int, bufferlist> degraded = encoded;
- degraded.erase(0);
+ shard_id_map<bufferlist> degraded = encoded;
+ degraded.erase(shard_id_t(0));
EXPECT_EQ(2u, degraded.size());
int want_to_decode[] = { 0, 1 };
- map<int, bufferlist> decoded;
- EXPECT_EQ(0, example._decode(set<int>(want_to_decode, want_to_decode+2),
+ shard_id_map<bufferlist> decoded(example.get_chunk_count());
+ EXPECT_EQ(0, example._decode(shard_id_set (want_to_decode, want_to_decode+2),
degraded,
&decoded));
EXPECT_EQ(2u, decoded.size());
- EXPECT_EQ(3u, decoded[0].length());
- EXPECT_EQ('A', decoded[0][0]);
- EXPECT_EQ('B', decoded[0][1]);
- EXPECT_EQ('C', decoded[0][2]);
- EXPECT_EQ('D', decoded[1][0]);
- EXPECT_EQ('E', decoded[1][1]);
+ EXPECT_EQ(3u, decoded[shard_id_t(0)].length());
+ EXPECT_EQ('A', decoded[shard_id_t(0)][0]);
+ EXPECT_EQ('B', decoded[shard_id_t(0)][1]);
+ EXPECT_EQ('C', decoded[shard_id_t(0)][2]);
+ EXPECT_EQ('D', decoded[shard_id_t(1)][0]);
+ EXPECT_EQ('E', decoded[shard_id_t(1)][1]);
}
}
-TEST(ErasureCodeExample, decode)
+IGNORE_DEPRECATED
+TEST(ErasureCodeExample, decode_legacy)
{
ErasureCodeExample example;
EXPECT_EQ(out.length(), encoded[0].length());
// cannot recover
- map<int, bufferlist> degraded;
+ map<int, bufferlist> degraded;
degraded[2] = encoded[2];
EXPECT_EQ(-ERANGE, example.decode_concat(degraded, &out));
}
+END_IGNORE_DEPRECATED
+
+TEST(ErasureCodeExample, decode)
+{
+ ErasureCodeExample example;
+
+#define LARGE_ENOUGH 2048
+ bufferptr in_ptr(buffer::create_page_aligned(LARGE_ENOUGH));
+ in_ptr.zero();
+ in_ptr.set_length(0);
+ const char *payload =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
+ in_ptr.append(payload, strlen(payload));
+ bufferlist in;
+ in.push_back(in_ptr);
+ int want_to_encode[] = { 0, 1, 2 };
+ shard_id_map<bufferlist> encoded(example.get_chunk_count());
+ EXPECT_EQ(0, example.encode(shard_id_set(want_to_encode, want_to_encode+3),
+ in,
+ &encoded));
+ EXPECT_EQ(3u, encoded.size());
+
+ // successful decode
+ bufferlist out;
+ shard_id_t shard0(0);
+ shard_id_t shard1(1);
+ encoded.erase(shard0);
+ shard_id_map<bufferlist> decoded(example.get_chunk_count());
+ EXPECT_EQ(0, example.decode(shard_id_set{shard0},
+ encoded, &decoded, 0));
+ bufferlist usable;
+ EXPECT_EQ(decoded[shard0].length(), encoded[shard1].length());
+}
TEST(ErasureCodeExample, create_rule)
{
* End:
*/
+END_IGNORE_DEPRECATED
class IsaErasureCodeTest : public ::testing::Test {
public:
- void compare_chunks(bufferlist &in, map<int, bufferlist> &encoded);
+ void compare_chunks(bufferlist &in, shard_id_map<bufferlist> &encoded);
void encode_decode(unsigned object_size);
};
-void IsaErasureCodeTest::compare_chunks(bufferlist &in, map<int, bufferlist> &encoded)
+void IsaErasureCodeTest::compare_chunks(bufferlist &in, shard_id_map<bufferlist> &encoded)
{
unsigned object_size = in.length();
- unsigned chunk_size = encoded[0].length();
+ unsigned chunk_size = encoded[shard_id_t(0)].length();
for (unsigned i = 0; i < encoded.size(); i++) {
if (i * chunk_size >= object_size)
break;
int chunk_length = object_size > (i + 1) * chunk_size ? chunk_size : object_size - i * chunk_size;
- EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + i * chunk_size, chunk_length));
+ EXPECT_EQ(0, memcmp(encoded[shard_id_t(i)].c_str(), in.c_str() + i * chunk_size, chunk_length));
}
}
// may be multiple bufferptr if object_size is larger than CEPH_PAGE_SIZE
in.append(payload.c_str(), payload.length());
int want_to_encode[] = {0, 1, 2, 3};
- map<int, bufferlist> encoded;
- EXPECT_EQ(0, Isa.encode(set<int>(want_to_encode, want_to_encode + 4),
+ shard_id_map<bufferlist> encoded(Isa.get_chunk_count());
+ EXPECT_EQ(0, Isa.encode(shard_id_set(want_to_encode, want_to_encode + 4),
in,
&encoded));
EXPECT_EQ(4u, encoded.size());
- unsigned chunk_size = encoded[0].length();
+ unsigned chunk_size = encoded[shard_id_t(0)].length();
EXPECT_EQ(chunk_size, Isa.get_chunk_size(object_size));
compare_chunks(in, encoded);
// all chunks are available
{
int want_to_decode[] = {0, 1};
- map<int, bufferlist> decoded;
- EXPECT_EQ(0, Isa._decode(set<int>(want_to_decode, want_to_decode + 2),
+ shard_id_map<bufferlist> decoded(Isa.get_chunk_count());
+ EXPECT_EQ(0, Isa._decode(shard_id_set(want_to_decode, want_to_decode + 2),
encoded,
&decoded));
EXPECT_EQ(2u, decoded.size());
- EXPECT_EQ(chunk_size, decoded[0].length());
+ EXPECT_EQ(chunk_size, decoded[shard_id_t(0)].length());
compare_chunks(in, decoded);
}
// one data chunk is missing
{
- map<int, bufferlist> degraded = encoded;
+ shard_id_map<bufferlist> degraded = encoded;
- string enc1(encoded[1].c_str(), chunk_size);
+ string enc1(encoded[shard_id_t(1)].c_str(), chunk_size);
- degraded.erase(1);
+ degraded.erase(shard_id_t(1));
EXPECT_EQ(3u, degraded.size());
int want_to_decode[] = {1};
- map<int, bufferlist> decoded;
- EXPECT_EQ(0, Isa._decode(set<int>(want_to_decode, want_to_decode + 1),
+ shard_id_map<bufferlist> decoded(Isa.get_chunk_count());
+ EXPECT_EQ(0, Isa._decode(shard_id_set(want_to_decode, want_to_decode + 1),
degraded,
&decoded));
// always decode all, regardless of want_to_decode
EXPECT_EQ(4u, decoded.size());
- EXPECT_EQ(chunk_size, decoded[1].length());
- EXPECT_EQ(0, memcmp(decoded[1].c_str(), enc1.c_str(), chunk_size));
+ EXPECT_EQ(chunk_size, decoded[shard_id_t(1)].length());
+ EXPECT_EQ(0, memcmp(decoded[shard_id_t(1)].c_str(), enc1.c_str(), chunk_size));
}
// non-xor coding chunk is missing
{
- map<int, bufferlist> degraded = encoded;
+ shard_id_map<bufferlist> degraded = encoded;
- string enc3(encoded[3].c_str(), chunk_size);
+ string enc3(encoded[shard_id_t(3)].c_str(), chunk_size);
- degraded.erase(3);
+ degraded.erase(shard_id_t(3));
EXPECT_EQ(3u, degraded.size());
int want_to_decode[] = {3};
- map<int, bufferlist> decoded;
- EXPECT_EQ(0, Isa._decode(set<int>(want_to_decode, want_to_decode + 1),
+ shard_id_map<bufferlist> decoded(Isa.get_chunk_count());
+ EXPECT_EQ(0, Isa._decode(shard_id_set(want_to_decode, want_to_decode + 1),
degraded,
&decoded));
// always decode all, regardless of want_to_decode
EXPECT_EQ(4u, decoded.size());
- EXPECT_EQ(chunk_size, decoded[3].length());
- EXPECT_EQ(0, memcmp(decoded[3].c_str(), enc3.c_str(), chunk_size));
+ EXPECT_EQ(chunk_size, decoded[shard_id_t(3)].length());
+ EXPECT_EQ(0, memcmp(decoded[shard_id_t(3)].c_str(), enc3.c_str(), chunk_size));
}
// xor coding chunk is missing
{
- map<int, bufferlist> degraded = encoded;
+ shard_id_map<bufferlist> degraded = encoded;
- string enc2(encoded[2].c_str(), chunk_size);
+ string enc2(encoded[shard_id_t(2)].c_str(), chunk_size);
- degraded.erase(2);
+ degraded.erase(shard_id_t(2));
EXPECT_EQ(3u, degraded.size());
int want_to_decode[] = {2};
- map<int, bufferlist> decoded;
- EXPECT_EQ(0, Isa._decode(set<int>(want_to_decode, want_to_decode + 1),
+ shard_id_map<bufferlist> decoded(Isa.get_chunk_count());
+ EXPECT_EQ(0, Isa._decode(shard_id_set(want_to_decode, want_to_decode + 1),
degraded,
&decoded));
// always decode all, regardless of want_to_decode
EXPECT_EQ(4u, decoded.size());
- EXPECT_EQ(chunk_size, decoded[2].length());
- EXPECT_EQ(0, memcmp(decoded[2].c_str(), enc2.c_str(), chunk_size));
+ EXPECT_EQ(chunk_size, decoded[shard_id_t(2)].length());
+ EXPECT_EQ(0, memcmp(decoded[shard_id_t(2)].c_str(), enc2.c_str(), chunk_size));
}
// one data and one coding chunk is missing
{
- map<int, bufferlist> degraded = encoded;
+ shard_id_map<bufferlist> degraded = encoded;
- string enc3(encoded[3].c_str(), chunk_size);
+ string enc3(encoded[shard_id_t(3)].c_str(), chunk_size);
- degraded.erase(1);
- degraded.erase(3);
+ degraded.erase(shard_id_t(1));
+ degraded.erase(shard_id_t(3));
EXPECT_EQ(2u, degraded.size());
int want_to_decode[] = {1, 3};
- map<int, bufferlist> decoded;
- EXPECT_EQ(0, Isa._decode(set<int>(want_to_decode, want_to_decode + 2),
+ shard_id_map<bufferlist> decoded(Isa.get_chunk_count());
+ EXPECT_EQ(0, Isa._decode(shard_id_set(want_to_decode, want_to_decode + 2),
degraded,
&decoded));
// always decode all, regardless of want_to_decode
EXPECT_EQ(4u, decoded.size());
- EXPECT_EQ(chunk_size, decoded[1].length());
- EXPECT_EQ(0, memcmp(decoded[3].c_str(), enc3.c_str(), chunk_size));
+ EXPECT_EQ(chunk_size, decoded[shard_id_t(1)].length());
+ EXPECT_EQ(0, memcmp(decoded[shard_id_t(3)].c_str(), enc3.c_str(), chunk_size));
}
// two data chunks are missing
{
- map<int, bufferlist> degraded = encoded;
- degraded.erase(0);
- degraded.erase(1);
+ shard_id_map<bufferlist> degraded = encoded;
+ degraded.erase(shard_id_t(0));
+ degraded.erase(shard_id_t(1));
EXPECT_EQ(2u, degraded.size());
int want_to_decode[] = {0, 1};
- map<int, bufferlist> decoded;
- EXPECT_EQ(0, Isa._decode(set<int>(want_to_decode, want_to_decode + 2),
+ shard_id_map<bufferlist> decoded(Isa.get_chunk_count());
+ EXPECT_EQ(0, Isa._decode(shard_id_set(want_to_decode, want_to_decode + 2),
degraded,
&decoded));
// always decode all, regardless of want_to_decode
EXPECT_EQ(4u, decoded.size());
- EXPECT_EQ(chunk_size, decoded[0].length());
+ EXPECT_EQ(chunk_size, decoded[shard_id_t(0)].length());
compare_chunks(in, decoded);
}
// If trying to read nothing, the minimum is empty.
//
{
- set<int> want_to_read;
- set<int> available_chunks;
- set<int> minimum;
+ shard_id_set want_to_read;
+ shard_id_set available_chunks;
+ shard_id_set minimum;
EXPECT_EQ(0, Isa._minimum_to_decode(want_to_read,
available_chunks,
// There is no way to read a chunk if none are available.
//
{
- set<int> want_to_read;
- set<int> available_chunks;
- set<int> minimum;
+ shard_id_set want_to_read;
+ shard_id_set available_chunks;
+ shard_id_set minimum;
- want_to_read.insert(0);
+ want_to_read.insert(shard_id_t(0));
EXPECT_EQ(-EIO, Isa._minimum_to_decode(want_to_read,
available_chunks,
// Reading a subset of the available chunks is always possible.
//
{
- set<int> want_to_read;
- set<int> available_chunks;
- set<int> minimum;
+ shard_id_set want_to_read;
+ shard_id_set available_chunks;
+ shard_id_set minimum;
- want_to_read.insert(0);
- available_chunks.insert(0);
+ want_to_read.insert(shard_id_t(0));
+ available_chunks.insert(shard_id_t(0));
EXPECT_EQ(0, Isa._minimum_to_decode(want_to_read,
available_chunks,
// chunks available.
//
{
- set<int> want_to_read;
- set<int> available_chunks;
- set<int> minimum;
+ shard_id_set want_to_read;
+ shard_id_set available_chunks;
+ shard_id_set minimum;
- want_to_read.insert(0);
- want_to_read.insert(1);
- available_chunks.insert(0);
+ want_to_read.insert(shard_id_t(0));
+ want_to_read.insert(shard_id_t(1));
+ available_chunks.insert(shard_id_t(0));
EXPECT_EQ(-EIO, Isa._minimum_to_decode(want_to_read,
available_chunks,
// of CPU and memory.
//
{
- set<int> want_to_read;
- set<int> available_chunks;
- set<int> minimum;
+ shard_id_set want_to_read;
+ shard_id_set available_chunks;
+ shard_id_set minimum;
- want_to_read.insert(1);
- want_to_read.insert(3);
- available_chunks.insert(0);
- available_chunks.insert(2);
- available_chunks.insert(3);
+ want_to_read.insert(shard_id_t(1));
+ want_to_read.insert(shard_id_t(3));
+ available_chunks.insert(shard_id_t(0));
+ available_chunks.insert(shard_id_t(2));
+ available_chunks.insert(shard_id_t(3));
EXPECT_EQ(0, Isa._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_EQ(2u, minimum.size());
- EXPECT_EQ(0u, minimum.count(3));
+ EXPECT_EQ(0u, minimum.count(shard_id_t(3)));
}
}
// it is not properly aligned, it is padded with zeros.
//
bufferlist in;
- map<int,bufferlist> encoded;
+ shard_id_map<bufferlist> encoded(Isa.get_chunk_count());
int want_to_encode[] = { 0, 1, 2, 3 };
int trail_length = 1;
in.append(string(aligned_object_size + trail_length, 'X'));
- EXPECT_EQ(0, Isa.encode(set<int>(want_to_encode, want_to_encode+4),
+ EXPECT_EQ(0, Isa.encode(shard_id_set(want_to_encode, want_to_encode+4),
in,
&encoded));
EXPECT_EQ(4u, encoded.size());
- char *last_chunk = encoded[1].c_str();
- int length =encoded[1].length();
+ char *last_chunk = encoded[shard_id_t(1)].c_str();
+ int length =encoded[shard_id_t(1)].length();
EXPECT_EQ('X', last_chunk[0]);
EXPECT_EQ('\0', last_chunk[length - trail_length]);
}
// valgrind (there is no leak).
//
bufferlist in;
- map<int,bufferlist> encoded;
- set<int> want_to_encode;
- want_to_encode.insert(0);
+ shard_id_map<bufferlist> encoded(Isa.get_chunk_count());
+ shard_id_set want_to_encode;
+ want_to_encode.insert(shard_id_t(0));
int trail_length = 1;
in.append(string(aligned_object_size + trail_length, 'X'));
EXPECT_EQ(0, Isa.encode(want_to_encode, in, &encoded));
}
bool
-DecodeAndVerify(ErasureCodeIsaDefault& Isa, map<int, bufferlist> °raded, set<int> want_to_decode, buffer::ptr* enc, int length)
+DecodeAndVerify(ErasureCodeIsaDefault& Isa, shard_id_map<bufferlist> °raded, shard_id_set want_to_decode, buffer::ptr* enc, int length)
{
- map<int, bufferlist> decoded;
+ shard_id_map<bufferlist> decoded(Isa.get_chunk_count());
bool ok;
// decode as requested
for (int i = 0; i < (int) decoded.size(); i++) {
// compare all the buffers with their original
- ok |= memcmp(decoded[i].c_str(), enc[i].c_str(), length);
+ ok |= memcmp(decoded[shard_id_t(i)].c_str(), enc[i].c_str(), length);
}
return ok;
bufferlist in;
in.push_back(in_ptr);
- set<int>want_to_encode;
+ shard_id_set want_to_encode;
- map<int, bufferlist> encoded;
- for (int i = 0; i < (k + m); i++) {
- want_to_encode.insert(i);
+ shard_id_map<bufferlist> encoded(Isa.get_chunk_count());
+ for (shard_id_t i; i < (k + m); ++i) {
+ want_to_encode.insert(shard_id_t(i));
}
EXPECT_EQ((unsigned) (k + m), encoded.size());
- unsigned length = encoded[0].length();
+ unsigned length = encoded[shard_id_t(0)].length();
for (int i = 0; i < k; i++) {
- EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + (i * length), length));
+ EXPECT_EQ(0, memcmp(encoded[shard_id_t(i)].c_str(), in.c_str() + (i * length), length));
}
buffer::ptr enc[k + m];
enc[i] = newenc;
enc[i].zero();
enc[i].set_length(0);
- enc[i].append(encoded[i].c_str(), length);
+ enc[i].append(encoded[shard_id_t(i)].c_str(), length);
}
}
int cnt_cf = 0;
for (int l1 = 0; l1 < (k + m); l1++) {
- map<int, bufferlist> degraded = encoded;
- set<int> want_to_decode;
+ shard_id_map<bufferlist> degraded = encoded;
+ shard_id_set want_to_decode;
bool err;
- degraded.erase(l1);
- want_to_decode.insert(l1);
+ degraded.erase(shard_id_t(l1));
+ want_to_decode.insert(shard_id_t(l1));
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l2 = l1 + 1; l2 < (k + m); l2++) {
- degraded.erase(l2);
- want_to_decode.insert(l2);
+ degraded.erase(shard_id_t(l2));
+ want_to_decode.insert(shard_id_t(l2));
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l3 = l2 + 1; l3 < (k + m); l3++) {
- degraded.erase(l3);
- want_to_decode.insert(l3);
+ degraded.erase(shard_id_t(l3));
+ want_to_decode.insert(shard_id_t(l3));
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l4 = l3 + 1; l4 < (k + m); l4++) {
- degraded.erase(l4);
- want_to_decode.insert(l4);
+ degraded.erase(shard_id_t(l4));
+ want_to_decode.insert(shard_id_t(l4));
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
- degraded[l4] = encoded[l4];
- want_to_decode.erase(l4);
+ degraded[shard_id_t(l4)] = encoded[shard_id_t(l4)];
+ want_to_decode.erase(shard_id_t(l4));
cnt_cf++;
}
- degraded[l3] = encoded[l3];
- want_to_decode.erase(l3);
+ degraded[shard_id_t(l3)] = encoded[shard_id_t(l3)];
+ want_to_decode.erase(shard_id_t(l3));
}
- degraded[l2] = encoded[l2];
- want_to_decode.erase(l2);
+ degraded[shard_id_t(l2)] = encoded[shard_id_t(l2)];
+ want_to_decode.erase(shard_id_t(l2));
}
- degraded[l1] = encoded[l1];
- want_to_decode.erase(l1);
+ degraded[shard_id_t(l1)] = encoded[shard_id_t(l1)];
+ want_to_decode.erase(shard_id_t(l1));
}
EXPECT_EQ(2516, cnt_cf);
EXPECT_EQ(2506, tcache.getDecodingTableCacheSize()); // 3 entries from (2,2) test and 2503 from (12,4)
bufferlist in;
in.push_back(in_ptr);
- set<int>want_to_encode;
+ shard_id_set want_to_encode;
- map<int, bufferlist> encoded;
+ shard_id_map<bufferlist> encoded(Isa.get_chunk_count());
for (int i = 0; i < (k + m); i++) {
- want_to_encode.insert(i);
+ want_to_encode.insert(shard_id_t(i));
}
EXPECT_EQ((unsigned) (k + m), encoded.size());
- unsigned length = encoded[0].length();
+ unsigned length = encoded[shard_id_t(0)].length();
for (int i = 0; i < k; i++) {
- EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + (i * length), length));
+ EXPECT_EQ(0, memcmp(encoded[shard_id_t(i)].c_str(), in.c_str() + (i * length), length));
}
buffer::ptr enc[k + m];
enc[i] = newenc;
enc[i].zero();
enc[i].set_length(0);
- enc[i].append(encoded[i].c_str(), length);
+ enc[i].append(encoded[shard_id_t(i)].c_str(), length);
}
}
int cnt_cf = 0;
for (int l1 = 0; l1 < (k + m); l1++) {
- map<int, bufferlist> degraded = encoded;
- set<int> want_to_decode;
+ shard_id_map<bufferlist> degraded = encoded;
+ shard_id_set want_to_decode;
bool err;
- degraded.erase(l1);
- want_to_decode.insert(l1);
+ degraded.erase(shard_id_t(l1));
+ want_to_decode.insert(shard_id_t(l1));
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l2 = l1 + 1; l2 < (k + m); l2++) {
- degraded.erase(l2);
- want_to_decode.insert(l2);
+ degraded.erase(shard_id_t(l2));
+ want_to_decode.insert(shard_id_t(l2));
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l3 = l2 + 1; l3 < (k + m); l3++) {
- degraded.erase(l3);
- want_to_decode.insert(l3);
+ degraded.erase(shard_id_t(l3));
+ want_to_decode.insert(shard_id_t(l3));
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l4 = l3 + 1; l4 < (k + m); l4++) {
- degraded.erase(l4);
- want_to_decode.insert(l4);
+ degraded.erase(shard_id_t(l4));
+ want_to_decode.insert(shard_id_t(l4));
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
- degraded[l4] = encoded[l4];
- want_to_decode.erase(l4);
+ degraded[shard_id_t(l4)] = encoded[shard_id_t(l4)];
+ want_to_decode.erase(shard_id_t(l4));
cnt_cf++;
}
- degraded[l3] = encoded[l3];
- want_to_decode.erase(l3);
+ degraded[shard_id_t(l3)] = encoded[shard_id_t(l3)];
+ want_to_decode.erase(shard_id_t(l3));
}
- degraded[l2] = encoded[l2];
- want_to_decode.erase(l2);
+ degraded[shard_id_t(l2)] = encoded[shard_id_t(l2)];
+ want_to_decode.erase(shard_id_t(l2));
}
- degraded[l1] = encoded[l1];
- want_to_decode.erase(l1);
+ degraded[shard_id_t(l1)] = encoded[shard_id_t(l1)];
+ want_to_decode.erase(shard_id_t(l1));
}
EXPECT_EQ(2516, cnt_cf);
EXPECT_EQ(2516, tcache.getDecodingTableCacheSize(ErasureCodeIsaDefault::kCauchy));
bufferlist in;
in.push_back(in_ptr);
- set<int>want_to_encode;
+ shard_id_set want_to_encode;
- map<int, bufferlist> encoded;
+ shard_id_map<bufferlist> encoded(Isa.get_chunk_count());
for (int i = 0; i < (k + m); i++) {
- want_to_encode.insert(i);
+ want_to_encode.insert(shard_id_t(i));
}
EXPECT_EQ((unsigned) (k + m), encoded.size());
- unsigned length = encoded[0].length();
+ unsigned length = encoded[shard_id_t(0)].length();
for (int i = 0; i < k; i++) {
- EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + (i * length), length));
+ EXPECT_EQ(0, memcmp(encoded[shard_id_t(i)].c_str(), in.c_str() + (i * length), length));
}
buffer::ptr enc[k + m];
enc[i] = newenc;
enc[i].zero();
enc[i].set_length(0);
- enc[i].append(encoded[i].c_str(), length);
+ enc[i].append(encoded[shard_id_t(i)].c_str(), length);
}
}
int cnt_cf = 0;
for (int l1 = 0; l1 < (k + m); l1++) {
- map<int, bufferlist> degraded = encoded;
- set<int> want_to_decode;
+ shard_id_map<bufferlist> degraded = encoded;
+ shard_id_set want_to_decode;
bool err;
- degraded.erase(l1);
- want_to_decode.insert(l1);
+ degraded.erase(shard_id_t(l1));
+ want_to_decode.insert(shard_id_t(l1));
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l2 = l1 + 1; l2 < (k + m); l2++) {
- degraded.erase(l2);
- want_to_decode.insert(l2);
+ degraded.erase(shard_id_t(l2));
+ want_to_decode.insert(shard_id_t(l2));
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l3 = l2 + 1; l3 < (k + m); l3++) {
- degraded.erase(l3);
- want_to_decode.insert(l3);
+ degraded.erase(shard_id_t(l3));
+ want_to_decode.insert(shard_id_t(l3));
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l4 = l3 + 1; l4 < (k + m); l4++) {
- degraded.erase(l4);
- want_to_decode.insert(l4);
+ degraded.erase(shard_id_t(l4));
+ want_to_decode.insert(shard_id_t(l4));
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
- degraded[l4] = encoded[l4];
- want_to_decode.erase(l4);
+ degraded[shard_id_t(l4)] = encoded[shard_id_t(l4)];
+ want_to_decode.erase(shard_id_t(l4));
cnt_cf++;
}
- degraded[l3] = encoded[l3];
- want_to_decode.erase(l3);
+ degraded[shard_id_t(l3)] = encoded[shard_id_t(l3)];
+ want_to_decode.erase(shard_id_t(l3));
}
- degraded[l2] = encoded[l2];
- want_to_decode.erase(l2);
+ degraded[shard_id_t(l2)] = encoded[shard_id_t(l2)];
+ want_to_decode.erase(shard_id_t(l2));
}
- degraded[l1] = encoded[l1];
- want_to_decode.erase(l1);
+ degraded[shard_id_t(l1)] = encoded[shard_id_t(l1)];
+ want_to_decode.erase(shard_id_t(l1));
}
EXPECT_EQ(6195, cnt_cf);
EXPECT_EQ(2516, tcache.getDecodingTableCacheSize(ErasureCodeIsaDefault::kCauchy));
bufferlist in;
in.push_back(in_ptr);
- set<int>want_to_encode;
+ shard_id_set want_to_encode;
- map<int, bufferlist> encoded;
+ shard_id_map<bufferlist> encoded(Isa.get_chunk_count());
for (int i = 0; i < (k + m); i++) {
- want_to_encode.insert(i);
+ want_to_encode.insert(shard_id_t(i));
}
EXPECT_EQ((unsigned) (k + m), encoded.size());
- unsigned length = encoded[0].length();
+ unsigned length = encoded[shard_id_t(0)].length();
for (int i = 0; i < k; i++) {
- EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + (i * length), length));
+ EXPECT_EQ(0, memcmp(encoded[shard_id_t(i)].c_str(), in.c_str() + (i * length), length));
}
buffer::ptr enc[k + m];
enc[i] = newenc;
enc[i].zero();
enc[i].set_length(0);
- enc[i].append(encoded[i].c_str(), length);
+ enc[i].append(encoded[shard_id_t(i)].c_str(), length);
}
}
int cnt_cf = 0;
for (int l1 = 0; l1 < (k + m); l1++) {
- map<int, bufferlist> degraded = encoded;
- set<int> want_to_decode;
+ shard_id_map<bufferlist> degraded = encoded;
+ shard_id_set want_to_decode;
bool err;
- degraded.erase(l1);
- want_to_decode.insert(l1);
+ degraded.erase(shard_id_t(l1));
+ want_to_decode.insert(shard_id_t(l1));
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
- degraded[l1] = encoded[l1];
- want_to_decode.erase(l1);
+ degraded[shard_id_t(l1)] = encoded[shard_id_t(l1)];
+ want_to_decode.erase(shard_id_t(l1));
}
EXPECT_EQ(5, cnt_cf);
}
bufferlist in;
in.push_back(in_ptr);
int want_to_encode[] = { 0, 1, 2, 3 };
- map<int, bufferlist> encoded;
- EXPECT_EQ(0, jerasure.encode(set<int>(want_to_encode, want_to_encode+4),
+ shard_id_map< bufferlist> encoded(jerasure.get_chunk_count());
+ EXPECT_EQ(0, jerasure.encode(shard_id_set(want_to_encode, want_to_encode+4),
in,
&encoded));
EXPECT_EQ(4u, encoded.size());
- unsigned length = encoded[0].length();
- EXPECT_EQ(0, memcmp(encoded[0].c_str(), in.c_str(), length));
- EXPECT_EQ(0, memcmp(encoded[1].c_str(), in.c_str() + length,
+ unsigned length = encoded[shard_id_t(0)].length();
+ EXPECT_EQ(0, memcmp(encoded[shard_id_t(0)].c_str(), in.c_str(), length));
+ EXPECT_EQ(0, memcmp(encoded[shard_id_t(1)].c_str(), in.c_str() + length,
in.length() - length));
// all chunks are available
{
int want_to_decode[] = { 0, 1 };
- map<int, bufferlist> decoded;
- EXPECT_EQ(0, jerasure._decode(set<int>(want_to_decode, want_to_decode+2),
+ shard_id_map< bufferlist> decoded(jerasure.get_chunk_count());
+ EXPECT_EQ(0, jerasure._decode(shard_id_set(want_to_decode, want_to_decode+2),
encoded,
&decoded));
EXPECT_EQ(2u, decoded.size());
- EXPECT_EQ(length, decoded[0].length());
- EXPECT_EQ(0, memcmp(decoded[0].c_str(), in.c_str(), length));
- EXPECT_EQ(0, memcmp(decoded[1].c_str(), in.c_str() + length,
+ EXPECT_EQ(length, decoded[shard_id_t(0)].length());
+ EXPECT_EQ(0, memcmp(decoded[shard_id_t(0)].c_str(), in.c_str(), length));
+ EXPECT_EQ(0, memcmp(decoded[shard_id_t(1)].c_str(), in.c_str() + length,
in.length() - length));
}
// two chunks are missing
{
- map<int, bufferlist> degraded = encoded;
- degraded.erase(0);
- degraded.erase(1);
+ shard_id_map< bufferlist> degraded = encoded;
+ degraded.erase(shard_id_t(0));
+ degraded.erase(shard_id_t(1));
EXPECT_EQ(2u, degraded.size());
int want_to_decode[] = { 0, 1 };
- map<int, bufferlist> decoded;
- EXPECT_EQ(0, jerasure._decode(set<int>(want_to_decode, want_to_decode+2),
+ shard_id_map< bufferlist> decoded(jerasure.get_chunk_count());
+ EXPECT_EQ(0, jerasure._decode(shard_id_set(want_to_decode, want_to_decode+2),
degraded,
&decoded));
// always decode all, regardless of want_to_decode
EXPECT_EQ(4u, decoded.size());
- EXPECT_EQ(length, decoded[0].length());
- EXPECT_EQ(0, memcmp(decoded[0].c_str(), in.c_str(), length));
- EXPECT_EQ(0, memcmp(decoded[1].c_str(), in.c_str() + length,
+ EXPECT_EQ(length, decoded[shard_id_t(0)].length());
+ EXPECT_EQ(0, memcmp(decoded[shard_id_t(0)].c_str(), in.c_str(), length));
+ EXPECT_EQ(0, memcmp(decoded[shard_id_t(1)].c_str(), in.c_str() + length,
in.length() - length));
}
-
- // partial decode with the exact-sized decode_concat()
- {
- map<int, bufferlist> partial_decode = encoded;
- // we have everything but want only the first chunk
- set<int> partial_want_to_read = { 0 };
- EXPECT_EQ(1u, partial_want_to_read.size());
- bufferlist out;
- EXPECT_EQ(0, jerasure.decode_concat(partial_want_to_read,
- partial_decode,
- &out));
- EXPECT_EQ(out.length(), partial_decode[0].length());
- }
-
- // partial degraded decode with the exact-sized decode_concat()
- {
- map<int, bufferlist> partial_decode = encoded;
- // we have everything but what we really want
- partial_decode.erase(0);
- set<int> partial_want_to_read = { 0 };
- EXPECT_EQ(1u, partial_want_to_read.size());
- bufferlist out;
- EXPECT_EQ(0, jerasure.decode_concat(partial_want_to_read,
- partial_decode,
- &out));
- EXPECT_EQ(out.length(), encoded[0].length());
- }
}
}
// If trying to read nothing, the minimum is empty.
//
{
- set<int> want_to_read;
- set<int> available_chunks;
- set<int> minimum;
+ shard_id_set want_to_read;
+ shard_id_set available_chunks;
+ shard_id_set minimum;
EXPECT_EQ(0, jerasure._minimum_to_decode(want_to_read,
available_chunks,
// There is no way to read a chunk if none are available.
//
{
- set<int> want_to_read;
- set<int> available_chunks;
- set<int> minimum;
+ shard_id_set want_to_read;
+ shard_id_set available_chunks;
+ shard_id_set minimum;
- want_to_read.insert(0);
+ want_to_read.insert(shard_id_t(0));
EXPECT_EQ(-EIO, jerasure._minimum_to_decode(want_to_read,
available_chunks,
// Reading a subset of the available chunks is always possible.
//
{
- set<int> want_to_read;
- set<int> available_chunks;
- set<int> minimum;
+ shard_id_set want_to_read;
+ shard_id_set available_chunks;
+ shard_id_set minimum;
- want_to_read.insert(0);
- available_chunks.insert(0);
+ want_to_read.insert(shard_id_t(0));
+ available_chunks.insert(shard_id_t(0));
EXPECT_EQ(0, jerasure._minimum_to_decode(want_to_read,
available_chunks,
// chunks available.
//
{
- set<int> want_to_read;
- set<int> available_chunks;
- set<int> minimum;
+ shard_id_set want_to_read;
+ shard_id_set available_chunks;
+ shard_id_set minimum;
- want_to_read.insert(0);
- want_to_read.insert(1);
- available_chunks.insert(0);
+ want_to_read.insert(shard_id_t(0));
+ want_to_read.insert(shard_id_t(1));
+ available_chunks.insert(shard_id_t(0));
EXPECT_EQ(-EIO, jerasure._minimum_to_decode(want_to_read,
available_chunks,
// of CPU and memory.
//
{
- set<int> want_to_read;
- set<int> available_chunks;
- set<int> minimum;
+ shard_id_set want_to_read;
+ shard_id_set available_chunks;
+ shard_id_set minimum;
- want_to_read.insert(1);
- want_to_read.insert(3);
- available_chunks.insert(0);
- available_chunks.insert(2);
- available_chunks.insert(3);
+ want_to_read.insert(shard_id_t(1));
+ want_to_read.insert(shard_id_t(3));
+ available_chunks.insert(shard_id_t(0));
+ available_chunks.insert(shard_id_t(2));
+ available_chunks.insert(shard_id_t(3));
EXPECT_EQ(0, jerasure._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_EQ(2u, minimum.size());
- EXPECT_EQ(0u, minimum.count(3));
+ EXPECT_EQ(0u, minimum.count(shard_id_t(3)));
}
}
// it is not properly aligned, it is padded with zeros.
//
bufferlist in;
- map<int,bufferlist> encoded;
+ shard_id_map<bufferlist> encoded(jerasure.get_chunk_count());
int want_to_encode[] = { 0, 1, 2, 3 };
int trail_length = 1;
in.append(string(aligned_object_size + trail_length, 'X'));
- EXPECT_EQ(0, jerasure.encode(set<int>(want_to_encode, want_to_encode+4),
+ EXPECT_EQ(0, jerasure.encode(shard_id_set(want_to_encode, want_to_encode+4),
in,
&encoded));
EXPECT_EQ(4u, encoded.size());
- char *last_chunk = encoded[1].c_str();
- int length =encoded[1].length();
+ char *last_chunk = encoded[shard_id_t(1)].c_str();
+ int length =encoded[shard_id_t(1)].length();
EXPECT_EQ('X', last_chunk[0]);
EXPECT_EQ('\0', last_chunk[length - trail_length]);
}
// valgrind (there is no leak).
//
bufferlist in;
- map<int,bufferlist> encoded;
- set<int> want_to_encode;
- want_to_encode.insert(0);
+ shard_id_map<bufferlist> encoded(jerasure.get_chunk_count());
+ shard_id_set want_to_encode;
+ want_to_encode.insert(shard_id_t(0));
int trail_length = 1;
in.append(string(aligned_object_size + trail_length, 'X'));
EXPECT_EQ(0, jerasure.encode(want_to_encode, in, &encoded));
"]";
profile["layers"] = description_string;
EXPECT_EQ(0, lrc.init(profile, &cerr));
- set<int> want_to_read;
- want_to_read.insert(1);
- set<int> available_chunks;
- available_chunks.insert(1);
- available_chunks.insert(2);
- set<int> minimum;
+ shard_id_set want_to_read;
+ want_to_read.insert(shard_id_t(1));
+ shard_id_set available_chunks;
+ available_chunks.insert(shard_id_t(1));
+ available_chunks.insert(shard_id_t(2));
+ shard_id_set minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(want_to_read, minimum);
}
lrc.get_chunk_count());
{
// want to read the last chunk
- set<int> want_to_read;
- want_to_read.insert(lrc.get_chunk_count() - 1);
+ shard_id_set want_to_read;
+ want_to_read.insert(shard_id_t(lrc.get_chunk_count() - 1));
// all chunks are available except the last chunk
- set<int> available_chunks;
+ shard_id_set available_chunks;
for (int i = 0; i < (int)lrc.get_chunk_count() - 1; i++)
- available_chunks.insert(i);
+ available_chunks.insert(shard_id_t(i));
// _____DDDDc can recover c
- set<int> minimum;
+ shard_id_set minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
- set<int> expected_minimum;
- expected_minimum.insert(5);
- expected_minimum.insert(6);
- expected_minimum.insert(7);
- expected_minimum.insert(8);
+ shard_id_set expected_minimum;
+ expected_minimum.insert(shard_id_t(5));
+ expected_minimum.insert(shard_id_t(6));
+ expected_minimum.insert(shard_id_t(7));
+ expected_minimum.insert(shard_id_t(8));
EXPECT_EQ(expected_minimum, minimum);
}
{
- set<int> want_to_read;
- want_to_read.insert(0);
- set<int> available_chunks;
+ shard_id_set want_to_read;
+ want_to_read.insert(shard_id_t(0));
+ shard_id_set available_chunks;
for (int i = 1; i < (int)lrc.get_chunk_count(); i++)
- available_chunks.insert(i);
- set<int> minimum;
+ available_chunks.insert(shard_id_t(i));
+ shard_id_set minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
- set<int> expected_minimum;
- expected_minimum.insert(2);
- expected_minimum.insert(3);
- expected_minimum.insert(4);
+ shard_id_set expected_minimum;
+ expected_minimum.insert(shard_id_t(2));
+ expected_minimum.insert(shard_id_t(3));
+ expected_minimum.insert(shard_id_t(4));
EXPECT_EQ(expected_minimum, minimum);
}
}
EXPECT_EQ(0, lrc.init(profile, &cerr));
EXPECT_EQ(profile["mapping"].length(),
lrc.get_chunk_count());
- set<int> want_to_read;
- want_to_read.insert(8);
+ shard_id_set want_to_read;
+ want_to_read.insert(shard_id_t(8));
//
// unable to recover, too many chunks missing
//
{
- set<int> available_chunks;
- available_chunks.insert(0);
- available_chunks.insert(1);
+ shard_id_set available_chunks;
+ available_chunks.insert(shard_id_t(0));
+ available_chunks.insert(shard_id_t(1));
// missing (2)
// missing (3)
- available_chunks.insert(4);
- available_chunks.insert(5);
- available_chunks.insert(6);
+ available_chunks.insert(shard_id_t(4));
+ available_chunks.insert(shard_id_t(5));
+ available_chunks.insert(shard_id_t(6));
// missing (7)
// missing (8)
- set<int> minimum;
+ shard_id_set minimum;
EXPECT_EQ(-EIO, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
}
//
// _cDDD_cDD success: recovers chunk 7, 8
//
{
- set<int> available_chunks;
- available_chunks.insert(0);
- available_chunks.insert(1);
+ shard_id_set available_chunks;
+ available_chunks.insert(shard_id_t(0));
+ available_chunks.insert(shard_id_t(1));
// missing (2)
- available_chunks.insert(3);
- available_chunks.insert(4);
- available_chunks.insert(5);
- available_chunks.insert(6);
+ available_chunks.insert(shard_id_t(3));
+ available_chunks.insert(shard_id_t(4));
+ available_chunks.insert(shard_id_t(5));
+ available_chunks.insert(shard_id_t(6));
// missing (7)
// missing (8)
- set<int> minimum;
+ shard_id_set minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(available_chunks, minimum);
}
unsigned int chunk_size = g_conf().get_val<Option::size_t>("osd_pool_erasure_code_stripe_unit");
unsigned int stripe_width = lrc.get_data_chunk_count() * chunk_size;
EXPECT_EQ(chunk_size, lrc.get_chunk_size(stripe_width));
- set<int> want_to_encode;
- map<int, bufferlist> encoded;
+ shard_id_set want_to_encode;
+ shard_id_map<bufferlist> encoded(lrc.get_chunk_count());
for (unsigned int i = 0; i < lrc.get_chunk_count(); ++i) {
- want_to_encode.insert(i);
+ want_to_encode.insert(shard_id_t(i));
bufferptr ptr(buffer::create_page_aligned(chunk_size));
bufferlist tmp;
tmp.push_back(ptr);
- tmp.claim_append(encoded[i]);
- encoded[i].swap(tmp);
+ tmp.claim_append(encoded[shard_id_t(i)]);
+ encoded[shard_id_t(i)].swap(tmp);
}
- const vector<int> &mapping = lrc.get_chunk_mapping();
+ const vector<shard_id_t> &mapping = lrc.get_chunk_mapping();
char c = 'A';
for (unsigned int i = 0; i < lrc.get_data_chunk_count(); i++) {
- int j = mapping[i];
+ shard_id_t j = mapping[i];
string s(chunk_size, c);
encoded[j].clear();
encoded[j].append(s);
c++;
}
- EXPECT_EQ(0, lrc.encode_chunks(want_to_encode, &encoded));
+ shard_id_map<bufferptr> in(lrc.get_chunk_count());
+ shard_id_map<bufferptr> out(lrc.get_chunk_count());
+ for (auto&& [shard, list] : encoded) {
+ auto bp = list.begin().get_current_ptr();
+ if (shard < lrc.get_data_chunk_count()) in[shard] = bp;
+ else out[shard] = bp;
+ }
+ EXPECT_EQ(0, lrc.encode_chunks(in, out));
{
- map<int, bufferlist> chunks;
- chunks[4] = encoded[4];
- chunks[5] = encoded[5];
- chunks[6] = encoded[6];
- set<int> want_to_read;
- want_to_read.insert(7);
- set<int> available_chunks;
- available_chunks.insert(4);
- available_chunks.insert(5);
- available_chunks.insert(6);
- set<int> minimum;
+ shard_id_map<bufferlist> chunks(lrc.get_chunk_count());
+ chunks[shard_id_t(4)] = encoded[shard_id_t(4)];
+ chunks[shard_id_t(5)] = encoded[shard_id_t(5)];
+ chunks[shard_id_t(6)] = encoded[shard_id_t(6)];
+ shard_id_set want_to_read;
+ want_to_read.insert(shard_id_t(7));
+ shard_id_set available_chunks;
+ available_chunks.insert(shard_id_t(4));
+ available_chunks.insert(shard_id_t(5));
+ available_chunks.insert(shard_id_t(6));
+ shard_id_set minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
// only need three chunks from the second local layer
EXPECT_EQ(3U, minimum.size());
- EXPECT_EQ(1U, minimum.count(4));
- EXPECT_EQ(1U, minimum.count(5));
- EXPECT_EQ(1U, minimum.count(6));
- map<int, bufferlist> decoded;
+ EXPECT_EQ(1U, minimum.count(shard_id_t(4)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(5)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(6)));
+ shard_id_map<bufferlist> decoded(lrc.get_chunk_count());
EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded));
string s(chunk_size, 'D');
- EXPECT_EQ(s, string(decoded[7].c_str(), chunk_size));
+ EXPECT_EQ(s, string(decoded[shard_id_t(7)].c_str(), chunk_size));
}
{
- set<int> want_to_read;
- want_to_read.insert(2);
- map<int, bufferlist> chunks;
- chunks[1] = encoded[1];
- chunks[3] = encoded[3];
- chunks[5] = encoded[5];
- chunks[6] = encoded[6];
- chunks[7] = encoded[7];
- set<int> available_chunks;
- available_chunks.insert(1);
- available_chunks.insert(3);
- available_chunks.insert(5);
- available_chunks.insert(6);
- available_chunks.insert(7);
- set<int> minimum;
+ shard_id_set want_to_read;
+ want_to_read.insert(shard_id_t(2));
+ shard_id_map<bufferlist> chunks(lrc.get_chunk_count());
+ chunks[shard_id_t(1)] = encoded[shard_id_t(1)];
+ chunks[shard_id_t(3)] = encoded[shard_id_t(3)];
+ chunks[shard_id_t(5)] = encoded[shard_id_t(5)];
+ chunks[shard_id_t(6)] = encoded[shard_id_t(6)];
+ chunks[shard_id_t(7)] = encoded[shard_id_t(7)];
+ shard_id_set available_chunks;
+ available_chunks.insert(shard_id_t(1));
+ available_chunks.insert(shard_id_t(3));
+ available_chunks.insert(shard_id_t(5));
+ available_chunks.insert(shard_id_t(6));
+ available_chunks.insert(shard_id_t(7));
+ shard_id_set minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(5U, minimum.size());
EXPECT_EQ(available_chunks, minimum);
- map<int, bufferlist> decoded;
+ shard_id_map<bufferlist> decoded(lrc.get_chunk_count());
EXPECT_EQ(0, lrc._decode(want_to_read, encoded, &decoded));
string s(chunk_size, 'A');
- EXPECT_EQ(s, string(decoded[2].c_str(), chunk_size));
+ EXPECT_EQ(s, string(decoded[shard_id_t(2)].c_str(), chunk_size));
}
{
- set<int> want_to_read;
- want_to_read.insert(3);
- want_to_read.insert(6);
- want_to_read.insert(7);
- set<int> available_chunks;
- available_chunks.insert(0);
- available_chunks.insert(1);
- available_chunks.insert(2);
- // available_chunks.insert(3);
- available_chunks.insert(4);
- available_chunks.insert(5);
- // available_chunks.insert(6);
- // available_chunks.insert(7);
- encoded.erase(3);
- encoded.erase(6);
- set<int> minimum;
+ shard_id_set want_to_read;
+ want_to_read.insert(shard_id_t(3));
+ want_to_read.insert(shard_id_t(6));
+ want_to_read.insert(shard_id_t(7));
+ shard_id_set available_chunks;
+ available_chunks.insert(shard_id_t(0));
+ available_chunks.insert(shard_id_t(1));
+ available_chunks.insert(shard_id_t(2));
+ // available_chunks.insert(shard_id_t(3));
+ available_chunks.insert(shard_id_t(4));
+ available_chunks.insert(shard_id_t(5));
+ // available_chunks.insert(shard_id_t(6));
+ // available_chunks.insert(shard_id_t(7));
+ encoded.erase(shard_id_t(3));
+ encoded.erase(shard_id_t(6));
+ shard_id_set minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(4U, minimum.size());
// only need two chunks from the first local layer
- EXPECT_EQ(1U, minimum.count(0));
- EXPECT_EQ(1U, minimum.count(2));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(0)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(2)));
// the above chunks will rebuild chunk 3 and the global layer only needs
// three more chunks to reach the required amount of chunks (4) to recover
// the last two
- EXPECT_EQ(1U, minimum.count(1));
- EXPECT_EQ(1U, minimum.count(2));
- EXPECT_EQ(1U, minimum.count(5));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(1)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(2)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(5)));
- map<int, bufferlist> decoded;
+ shard_id_map<bufferlist> decoded(lrc.get_chunk_count());
EXPECT_EQ(0, lrc._decode(want_to_read, encoded, &decoded));
{
string s(chunk_size, 'B');
- EXPECT_EQ(s, string(decoded[3].c_str(), chunk_size));
+ EXPECT_EQ(s, string(decoded[shard_id_t(3)].c_str(), chunk_size));
}
{
string s(chunk_size, 'C');
- EXPECT_EQ(s, string(decoded[6].c_str(), chunk_size));
+ EXPECT_EQ(s, string(decoded[shard_id_t(6)].c_str(), chunk_size));
}
{
string s(chunk_size, 'D');
- EXPECT_EQ(s, string(decoded[7].c_str(), chunk_size));
+ EXPECT_EQ(s, string(decoded[shard_id_t(7)].c_str(), chunk_size));
}
}
}
unsigned int chunk_size = g_conf().get_val<Option::size_t>("osd_pool_erasure_code_stripe_unit");
unsigned int stripe_width = lrc.get_data_chunk_count() * chunk_size;
EXPECT_EQ(chunk_size, lrc.get_chunk_size(stripe_width));
- set<int> want_to_encode;
- map<int, bufferlist> encoded;
+ shard_id_set want_to_encode;
+ shard_id_map<bufferlist> encoded(lrc.get_chunk_count());
for (unsigned int i = 0; i < lrc.get_chunk_count(); ++i) {
- want_to_encode.insert(i);
+ want_to_encode.insert(shard_id_t(i));
bufferptr ptr(buffer::create_page_aligned(chunk_size));
bufferlist tmp;
tmp.push_back(ptr);
- tmp.claim_append(encoded[i]);
- encoded[i].swap(tmp);
+ tmp.claim_append(encoded[shard_id_t(i)]);
+ encoded[shard_id_t(i)].swap(tmp);
}
- const vector<int> &mapping = lrc.get_chunk_mapping();
+ const vector<shard_id_t> &mapping = lrc.get_chunk_mapping();
char c = 'A';
for (unsigned int i = 0; i < lrc.get_data_chunk_count(); i++) {
- int j = mapping[i];
+ shard_id_t j = mapping[i];
string s(chunk_size, c);
encoded[j].clear();
encoded[j].append(s);
c++;
}
- EXPECT_EQ(0, lrc.encode_chunks(want_to_encode, &encoded));
+ shard_id_map<bufferptr> in(lrc.get_chunk_count());
+ shard_id_map<bufferptr> out(lrc.get_chunk_count());
+ for (auto&& [shard, list] : encoded) {
+ auto bp = list.begin().get_current_ptr();
+ if (shard < lrc.get_data_chunk_count()) in[shard] = bp;
+ else out[shard] = bp;
+ }
+ EXPECT_EQ(0, lrc.encode_chunks(in, out));
{
- set<int> want_to_read;
- want_to_read.insert(0);
- map<int, bufferlist> chunks;
- chunks[1] = encoded[1];
- chunks[3] = encoded[3];
- chunks[4] = encoded[4];
- chunks[5] = encoded[5];
- chunks[6] = encoded[6];
- chunks[7] = encoded[7];
- set<int> available_chunks;
- available_chunks.insert(1);
- available_chunks.insert(3);
- available_chunks.insert(4);
- available_chunks.insert(5);
- available_chunks.insert(6);
- available_chunks.insert(7);
- set<int> minimum;
+ shard_id_set want_to_read;
+ want_to_read.insert(shard_id_t(0));
+ shard_id_map<bufferlist> chunks(lrc.get_chunk_count());
+ chunks[shard_id_t(1)] = encoded[shard_id_t(1)];
+ chunks[shard_id_t(3)] = encoded[shard_id_t(3)];
+ chunks[shard_id_t(4)] = encoded[shard_id_t(4)];
+ chunks[shard_id_t(5)] = encoded[shard_id_t(5)];
+ chunks[shard_id_t(6)] = encoded[shard_id_t(6)];
+ chunks[shard_id_t(7)] = encoded[shard_id_t(7)];
+ shard_id_set available_chunks;
+ available_chunks.insert(shard_id_t(1));
+ available_chunks.insert(shard_id_t(3));
+ available_chunks.insert(shard_id_t(4));
+ available_chunks.insert(shard_id_t(5));
+ available_chunks.insert(shard_id_t(6));
+ available_chunks.insert(shard_id_t(7));
+ shard_id_set minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(4U, minimum.size());
- EXPECT_EQ(1U, minimum.count(1));
- EXPECT_EQ(1U, minimum.count(4));
- EXPECT_EQ(1U, minimum.count(5));
- EXPECT_EQ(1U, minimum.count(6));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(1)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(4)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(5)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(6)));
- map<int, bufferlist> decoded;
+ shard_id_map<bufferlist> decoded(lrc.get_chunk_count());
EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded));
string s(chunk_size, 'A');
- EXPECT_EQ(s, string(decoded[0].c_str(), chunk_size));
+ EXPECT_EQ(s, string(decoded[shard_id_t(0)].c_str(), chunk_size));
}
{
- set<int> want_to_read;
+ shard_id_set want_to_read;
for (unsigned int i = 0; i < lrc.get_chunk_count(); i++)
- want_to_read.insert(i);
- map<int, bufferlist> chunks;
- chunks[1] = encoded[1];
- chunks[3] = encoded[3];
- chunks[5] = encoded[5];
- chunks[6] = encoded[6];
- chunks[7] = encoded[7];
- set<int> available_chunks;
- available_chunks.insert(1);
- available_chunks.insert(3);
- available_chunks.insert(5);
- available_chunks.insert(6);
- available_chunks.insert(7);
- set<int> minimum;
+ want_to_read.insert(shard_id_t(i));
+ shard_id_map<bufferlist> chunks(lrc.get_chunk_count());
+ chunks[shard_id_t(1)] = encoded[shard_id_t(1)];
+ chunks[shard_id_t(3)] = encoded[shard_id_t(3)];
+ chunks[shard_id_t(5)] = encoded[shard_id_t(5)];
+ chunks[shard_id_t(6)] = encoded[shard_id_t(6)];
+ chunks[shard_id_t(7)] = encoded[shard_id_t(7)];
+ shard_id_set available_chunks;
+ available_chunks.insert(shard_id_t(1));
+ available_chunks.insert(shard_id_t(3));
+ available_chunks.insert(shard_id_t(5));
+ available_chunks.insert(shard_id_t(6));
+ available_chunks.insert(shard_id_t(7));
+ shard_id_set minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(5U, minimum.size());
- EXPECT_EQ(1U, minimum.count(1));
- EXPECT_EQ(1U, minimum.count(3));
- EXPECT_EQ(1U, minimum.count(5));
- EXPECT_EQ(1U, minimum.count(6));
- EXPECT_EQ(1U, minimum.count(7));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(1)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(3)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(5)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(6)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(7)));
- map<int, bufferlist> decoded;
+ shard_id_map<bufferlist> decoded(lrc.get_chunk_count());
EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded));
{
string s(chunk_size, 'A');
- EXPECT_EQ(s, string(decoded[0].c_str(), chunk_size));
+ EXPECT_EQ(s, string(decoded[shard_id_t(0)].c_str(), chunk_size));
}
{
string s(chunk_size, 'B');
- EXPECT_EQ(s, string(decoded[1].c_str(), chunk_size));
+ EXPECT_EQ(s, string(decoded[shard_id_t(1)].c_str(), chunk_size));
}
{
string s(chunk_size, 'C');
- EXPECT_EQ(s, string(decoded[4].c_str(), chunk_size));
+ EXPECT_EQ(s, string(decoded[shard_id_t(4)].c_str(), chunk_size));
}
{
string s(chunk_size, 'D');
- EXPECT_EQ(s, string(decoded[5].c_str(), chunk_size));
+ EXPECT_EQ(s, string(decoded[shard_id_t(5)].c_str(), chunk_size));
}
}
{
- set<int> want_to_read;
+ shard_id_set want_to_read;
for (unsigned int i = 0; i < lrc.get_chunk_count(); i++)
- want_to_read.insert(i);
- map<int, bufferlist> chunks;
- chunks[1] = encoded[1];
- chunks[3] = encoded[3];
- chunks[5] = encoded[5];
- chunks[6] = encoded[6];
- chunks[7] = encoded[7];
- set<int> available_chunks;
- available_chunks.insert(1);
- available_chunks.insert(3);
- available_chunks.insert(5);
- available_chunks.insert(6);
- available_chunks.insert(7);
- set<int> minimum;
+ want_to_read.insert(shard_id_t(i));
+ shard_id_map<bufferlist> chunks(lrc.get_chunk_count());
+ chunks[shard_id_t(1)] = encoded[shard_id_t(1)];
+ chunks[shard_id_t(3)] = encoded[shard_id_t(3)];
+ chunks[shard_id_t(5)] = encoded[shard_id_t(5)];
+ chunks[shard_id_t(6)] = encoded[shard_id_t(6)];
+ chunks[shard_id_t(7)] = encoded[shard_id_t(7)];
+ shard_id_set available_chunks;
+ available_chunks.insert(shard_id_t(1));
+ available_chunks.insert(shard_id_t(3));
+ available_chunks.insert(shard_id_t(5));
+ available_chunks.insert(shard_id_t(6));
+ available_chunks.insert(shard_id_t(7));
+ shard_id_set minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(5U, minimum.size());
- EXPECT_EQ(1U, minimum.count(1));
- EXPECT_EQ(1U, minimum.count(3));
- EXPECT_EQ(1U, minimum.count(5));
- EXPECT_EQ(1U, minimum.count(6));
- EXPECT_EQ(1U, minimum.count(7));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(1)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(3)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(5)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(6)));
+ EXPECT_EQ(1U, minimum.count(shard_id_t(7)));
- map<int, bufferlist> decoded;
+ shard_id_map<bufferlist> decoded(lrc.get_chunk_count());
EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded));
{
string s(chunk_size, 'A');
- EXPECT_EQ(s, string(decoded[0].c_str(), chunk_size));
+ EXPECT_EQ(s, string(decoded[shard_id_t(0)].c_str(), chunk_size));
}
{
string s(chunk_size, 'B');
- EXPECT_EQ(s, string(decoded[1].c_str(), chunk_size));
+ EXPECT_EQ(s, string(decoded[shard_id_t(1)].c_str(), chunk_size));
}
{
string s(chunk_size, 'C');
- EXPECT_EQ(s, string(decoded[4].c_str(), chunk_size));
+ EXPECT_EQ(s, string(decoded[shard_id_t(4)].c_str(), chunk_size));
}
{
string s(chunk_size, 'D');
- EXPECT_EQ(s, string(decoded[5].c_str(), chunk_size));
+ EXPECT_EQ(s, string(decoded[shard_id_t(5)].c_str(), chunk_size));
}
}
{
- set<int> want_to_read;
- want_to_read.insert(6);
- map<int, bufferlist> chunks;
- chunks[0] = encoded[0];
- chunks[1] = encoded[1];
- chunks[3] = encoded[3];
- chunks[5] = encoded[5];
- chunks[7] = encoded[7];
- set<int> available_chunks;
- available_chunks.insert(0);
- available_chunks.insert(1);
- available_chunks.insert(3);
- available_chunks.insert(5);
- available_chunks.insert(7);
- set<int> minimum;
+ shard_id_set want_to_read;
+ want_to_read.insert(shard_id_t(6));
+ shard_id_map<bufferlist> chunks(lrc.get_chunk_count());
+ chunks[shard_id_t(0)] = encoded[shard_id_t(0)];
+ chunks[shard_id_t(1)] = encoded[shard_id_t(1)];
+ chunks[shard_id_t(3)] = encoded[shard_id_t(3)];
+ chunks[shard_id_t(5)] = encoded[shard_id_t(5)];
+ chunks[shard_id_t(7)] = encoded[shard_id_t(7)];
+ shard_id_set available_chunks;
+ available_chunks.insert(shard_id_t(0));
+ available_chunks.insert(shard_id_t(1));
+ available_chunks.insert(shard_id_t(3));
+ available_chunks.insert(shard_id_t(5));
+ available_chunks.insert(shard_id_t(7));
+ shard_id_set minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(available_chunks, minimum);
- map<int, bufferlist> decoded;
+ shard_id_map<bufferlist> decoded(lrc.get_chunk_count());
EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded));
}
}
*/
#include <errno.h>
#include <stdlib.h>
-#include "arch/probe.h"
-#include "arch/intel.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "global/global_context.h"
#include "common/config_proxy.h"
TEST_P(PluginTest,PartialRead)
{
initialize();
- set<int> want_to_encode;
- for (unsigned int i = 0 ; i < get_k_plus_m(); i++) {
+ shard_id_set want_to_encode;
+ for (shard_id_t i; i < get_k_plus_m(); ++i) {
want_to_encode.insert(i);
}
- // Test erasure code is systematic and that the data
- // order is described by get_chunk_mapping().
+ // Test erasure code is systematic and that the data order is described by
+ // get_chunk_mapping().
//
- // Create a buffer and encode it. Compare the
- // encoded shards of data with the equivalent
- // range of the buffer.
+ // Create a buffer and encode it. Compare the encoded shards of data with the
+ // equivalent range of the buffer.
//
- // If there are no differences the plugin should
- // report that it supports PARTIAL_READ_OPTIMIZATION
+ // If there are no differences the plugin should report that it supports
+ // PARTIAL_READ_OPTIMIZATION
bufferlist bl;
for (unsigned int i = 0; i < get_k(); i++) {
generate_chunk(bl);
}
- map<int,bufferlist> encoded;
+ shard_id_map<bufferlist> encoded(get_k_plus_m());
erasure_code->encode(want_to_encode, bl, &encoded);
- std::vector<int> chunk_mapping = erasure_code->get_chunk_mapping();
+ std::vector<shard_id_t> chunk_mapping = erasure_code->get_chunk_mapping();
bool different = false;
- for (unsigned int i = 0; i < get_k_plus_m(); i++) {
+ for (shard_id_t i; i < get_k_plus_m(); ++i) {
EXPECT_EQ(chunk_size, encoded[i].length());
- unsigned int index = (chunk_mapping.size() > i) ? chunk_mapping[i] : i;
+ shard_id_t index = (chunk_mapping.size() > i) ? chunk_mapping[int(i)] : i;
if (i < get_k()) {
bufferlist expects;
- expects.substr_of(bl, i * chunk_size, chunk_size);
+ expects.substr_of(bl, int(i) * chunk_size, chunk_size);
if (expects != encoded[index]) {
different = true;
}
TEST_P(PluginTest,PartialWrite)
{
initialize();
- set<int> want_to_encode;
- for (unsigned int i = 0 ; i < get_k_plus_m(); i++) {
+ shard_id_set want_to_encode;
+ for (shard_id_t i; i < get_k_plus_m(); ++i) {
want_to_encode.insert(i);
}
// Test erasure code can perform partial writes
//
- // Create buffer 1 that consists of 3 randomly
- // generated chunks for each shard
+ // Create buffer 1 that consists of 3 randomly generated chunks for each shard
//
- // Create buffer 2 that has a different middle
- // chunk for each shard
+ // Create buffer 2 that has a different middle chunk for each shard
//
- // Create buffer 3 that just has the 1 different
- // middle chunk for each shard
+ // Create buffer 3 that just has the 1 different middle chunk for each shard
//
- // encoded the 3 buffers. Check if the first and
- // last chunk of encoded shard buffer 1 and 2 are
- // the same. Check if the midle chunk of encoded
- // shard buffer 2 is the same as encoded shard
- // buffer 3.
+ // encoded the 3 buffers. Check if the first and last chunk of encoded shard
+ // buffer 1 and 2 are the same. Check if the midle chunk of encoded shard
+ // buffer 2 is the same as encoded shard buffer 3.
//
- // If there are no differences the plugin should
- // report that it supports PARTIAL_WRITE_OPTIMIZATION
+ // If there are no differences the plugin should report that it supports
+ // PARTIAL_WRITE_OPTIMIZATION
bufferlist bl1;
bufferlist bl2;
bufferlist bl3;
bl2.append(b3);
bl3.append(c2);
}
- map<int,bufferlist> encoded1;
+ shard_id_map<bufferlist> encoded1(get_k_plus_m());
erasure_code->encode(want_to_encode, bl1, &encoded1);
- map<int,bufferlist> encoded2;
+ shard_id_map<bufferlist> encoded2(get_k_plus_m());
erasure_code->encode(want_to_encode, bl2, &encoded2);
- map<int,bufferlist> encoded3;
+ shard_id_map<bufferlist> encoded3(get_k_plus_m());
erasure_code->encode(want_to_encode, bl3, &encoded3);
bool different = false;
- for (unsigned int i = 0; i < get_k_plus_m(); i++) {
+ for (shard_id_t i; i < get_k_plus_m(); ++i) {
EXPECT_EQ(chunk_size*3, encoded1[i].length());
EXPECT_EQ(chunk_size*3, encoded2[i].length());
EXPECT_EQ(chunk_size, encoded3[i].length());
TEST_P(PluginTest,ZeroInZeroOut)
{
initialize();
- set<int> want_to_encode;
- for (unsigned int i = 0 ; i < get_k_plus_m(); i++) {
+ shard_id_set want_to_encode;
+ for (shard_id_t i; i < get_k_plus_m(); ++i) {
want_to_encode.insert(i);
}
// Test erasure code generates zeros for coding parity if data chunks are zeros
for (unsigned int i = 0; i < get_k(); i++) {
generate_chunk(bl, 0);
}
- map<int,bufferlist> encoded;
+ shard_id_map<bufferlist> encoded(get_k_plus_m());
erasure_code->encode(want_to_encode, bl, &encoded);
bool different = false;
bufferlist expects;
generate_chunk(expects, 0);
- for (unsigned int i = 0; i < get_k_plus_m(); i++) {
+ for (shard_id_t i; i < get_k_plus_m(); ++i) {
EXPECT_EQ(chunk_size, encoded[i].length());
if (expects != encoded[i]) {
different = true;
// 3. Test that EncodeDelta generates the expected delta when given the
// original data chunk and the new data chunk.
// 4. Do a second full write with the new chunk.
- // 5. Test that ApplyDelta correctly applies the delta to the original parity chunk
- // and returns the same new parity chunk as the second full write.
+ // 5. Test that ApplyDelta correctly applies the delta to the original parity
+ // chunk and returns the same new parity chunk as the second full write.
initialize();
if (!(erasure_code->get_supported_optimizations() &
ErasureCodeInterface::FLAG_EC_PLUGIN_PARITY_DELTA_OPTIMIZATION)) {
GTEST_SKIP() << "Plugin does not support parity delta optimization";
}
- set<int> want_to_encode;
- for (unsigned int i = 0 ; i < get_k_plus_m(); i++) {
+ shard_id_set want_to_encode;
+ for (shard_id_t i ; i < get_k_plus_m(); ++i) {
want_to_encode.insert(i);
}
bufferlist old_bl;
- for (unsigned int i = 0; i < get_k(); i++) {
+ for (unsigned int i = 0; i < get_k(); ++i) {
generate_chunk(old_bl);
}
- map<int,bufferlist> old_encoded;
+ shard_id_map<bufferlist> old_encoded(get_k_plus_m());
erasure_code->encode(want_to_encode, old_bl, &old_encoded);
bufferlist new_chunk_bl;
random_device rand;
mt19937 gen(rand());
uniform_int_distribution<> chunk_range(0, get_k()-1);
- unsigned int random_chunk = chunk_range(gen);
+ shard_id_t random_chunk(chunk_range(gen));
ceph::bufferptr old_data = buffer::create_aligned(chunk_size, 4096);
- old_bl.begin(random_chunk * chunk_size).copy(chunk_size, old_data.c_str());
+ old_bl.begin(int(random_chunk) * chunk_size).copy(chunk_size, old_data.c_str());
ceph::bufferptr new_data = new_chunk_bl.front();
ceph::bufferptr delta = buffer::create_aligned(chunk_size, 4096);
ceph::bufferptr expected_delta = buffer::create_aligned(chunk_size, 4096);
EXPECT_EQ(delta_matches, true);
uniform_int_distribution<> parity_range(get_k(), get_k_plus_m()-1);
- unsigned int random_parity = parity_range(gen);
+ shard_id_t random_parity(parity_range(gen));
ceph::bufferptr old_parity = buffer::create_aligned(chunk_size, 4096);
old_encoded[random_parity].begin(0).copy(chunk_size, old_parity.c_str());
- map<int,bufferlist> new_encoded;
+ shard_id_map<bufferlist> new_encoded(get_k_plus_m());
bufferlist new_bl;
for (auto i = old_encoded.begin(); i != old_encoded.end(); i++) {
if ((unsigned int)i->first >= get_k()) {
continue;
}
- if ((unsigned int)i->first == random_chunk) {
+ if (i->first == random_chunk) {
new_bl.append(new_data);
}
else {
ceph::bufferptr expected_parity = buffer::create_aligned(chunk_size, 4096);
new_encoded[random_parity].begin().copy_deep(chunk_size, expected_parity);
- map <int, bufferptr> in_map;
+ shard_id_map<bufferptr> in_map(get_k_plus_m());
in_map[random_chunk] = delta;
in_map[random_parity] = old_parity;
- map <int, bufferptr> out_map;
+ shard_id_map<bufferptr> out_map(get_k_plus_m());
out_map[random_parity] = old_parity;
- erasure_code->apply_delta((const map<int, bufferptr>)in_map, out_map);
+ erasure_code->apply_delta(in_map, out_map);
bool parity_matches = true;
for (int i = 0; i < chunk_size; i++) {
ErasureCodeInterface::FLAG_EC_PLUGIN_PARITY_DELTA_OPTIMIZATION)) {
GTEST_SKIP() << "Plugin does not support parity delta optimization";
}
- set<int> want_to_encode;
- for (unsigned int i = 0 ; i < get_k_plus_m(); i++) {
+ shard_id_set want_to_encode;
+ for (shard_id_t i ; i < get_k_plus_m(); ++i) {
want_to_encode.insert(i);
}
for (unsigned int i = 0; i < get_k(); i++) {
generate_chunk(old_bl);
}
- map<int,bufferlist> old_encoded;
+ shard_id_map<bufferlist> old_encoded(get_k_plus_m());
erasure_code->encode(want_to_encode, old_bl, &old_encoded);
bufferlist new_bl;
for (unsigned int i = 0; i < get_k(); i++) {
generate_chunk(new_bl);
}
- map<int,bufferlist> new_encoded;
+ shard_id_map<bufferlist> new_encoded(get_k_plus_m());
erasure_code->encode(want_to_encode, new_bl, &new_encoded);
ceph::bufferptr old_data = buffer::create_aligned(chunk_size*get_k(), 4096);
}
EXPECT_EQ(delta_matches, true);
- map <int, bufferptr> in_map;
- map <int, bufferptr> out_map;
- for (unsigned int i = 0; i < get_k(); i++) {
+ shard_id_map<bufferptr> in_map(get_k_plus_m());
+ shard_id_map<bufferptr> out_map(get_k_plus_m());
+ for (shard_id_t i; i < get_k(); ++i) {
ceph::bufferptr tmp = buffer::create_aligned(chunk_size, 4096);
- delta.copy_out(chunk_size * i, chunk_size, tmp.c_str());
+ delta.copy_out(chunk_size * int(i), chunk_size, tmp.c_str());
in_map[i] = tmp;
}
- for (unsigned int i = get_k(); i < get_k_plus_m(); i++) {
+ for (shard_id_t i(get_k()); i < get_k_plus_m(); ++i) {
ceph::bufferptr tmp = buffer::create_aligned(chunk_size, 4096);
old_encoded[i].begin().copy(chunk_size, tmp.c_str());
in_map[i] = tmp;
out_map[i] = tmp;
}
- erasure_code->apply_delta((const map<int, bufferptr>)in_map, out_map);
+ erasure_code->apply_delta(in_map, out_map);
bool parity_matches = true;
- for (unsigned int i = get_k(); i < get_k_plus_m(); i++) {
+ for (shard_id_t i(get_k()); i < get_k_plus_m(); ++i) {
for (int j = 0; j < chunk_size; j++) {
if (out_map[i].c_str()[j] != new_encoded[i].c_str()[j]) {
parity_matches = false;
"plugin=jerasure technique=liber8tion k=4 m=2 packetsize=32",
"plugin=jerasure technique=liber8tion k=5 m=2 packetsize=32",
"plugin=jerasure technique=liber8tion k=6 m=2 packetsize=32",
- "plugin=clay k=2 m=1",
- "plugin=clay k=3 m=1",
- "plugin=clay k=4 m=1",
- "plugin=clay k=5 m=1",
- "plugin=clay k=6 m=1",
- "plugin=clay k=2 m=2",
- "plugin=clay k=3 m=2",
- "plugin=clay k=4 m=2",
- "plugin=clay k=5 m=2",
- "plugin=clay k=6 m=2",
- "plugin=clay k=2 m=3",
- "plugin=clay k=3 m=3",
- "plugin=clay k=4 m=3",
- "plugin=clay k=5 m=3",
- "plugin=clay k=6 m=3",
+ // Disabling clay for now. Needs more testing with optimized EC.
+ // "plugin=clay k=2 m=1",
+ // "plugin=clay k=3 m=1",
+ // "plugin=clay k=4 m=1",
+ // "plugin=clay k=5 m=1",
+ // "plugin=clay k=6 m=1",
+ // "plugin=clay k=2 m=2",
+ // "plugin=clay k=3 m=2",
+ // "plugin=clay k=4 m=2",
+ // "plugin=clay k=5 m=2",
+ // "plugin=clay k=6 m=2",
+ // "plugin=clay k=2 m=3",
+ // "plugin=clay k=3 m=3",
+ // "plugin=clay k=4 m=3",
+ // "plugin=clay k=5 m=3",
+ // "plugin=clay k=6 m=3",
"plugin=shec technique=single k=2 m=1 c=1",
"plugin=shec technique=single k=3 m=1 c=1",
"plugin=shec technique=single k=4 m=1 c=1",
shec->init(*profile, &cerr);
//minimum_to_decode
- set<int> want_to_decode;
- set<int> available_chunks;
- set<int> minimum_chunks;
+ shard_id_set want_to_decode;
+ shard_id_set available_chunks;
+ shard_id_set minimum_chunks;
for (int i = 0; i < 8; ++i) {
- want_to_decode.insert(i);
+ want_to_decode.insert(shard_id_t(i));
}
for (int i = 0; i < 5; ++i) {
- available_chunks.insert(i);
+ available_chunks.insert(shard_id_t(i));
}
int r = shec->_minimum_to_decode(want_to_decode, available_chunks,
shec->init(*profile, &cerr);
//minimum_to_decode
- set<int> want_to_decode;
- set<int> available_chunks;
- set<int> minimum_chunks;
+ shard_id_set want_to_decode;
+ shard_id_set available_chunks;
+ shard_id_set minimum_chunks;
for (int i = 0; i < 4; ++i) {
- want_to_decode.insert(i);
+ want_to_decode.insert(shard_id_t(i));
}
for (int i = 0; i < 8; ++i) {
- available_chunks.insert(i);
+ available_chunks.insert(shard_id_t(i));
}
int r = shec->_minimum_to_decode(want_to_decode, available_chunks,
shec->init(*profile, &cerr);
//minimum_to_decode
- set<int> want_to_decode;
- set<int> available_chunks;
- set<int> minimum_chunks;
+ shard_id_set want_to_decode;
+ shard_id_set available_chunks;
+ shard_id_set minimum_chunks;
for (int i = 0; i < 7; ++i) {
- want_to_decode.insert(i);
+ want_to_decode.insert(shard_id_t(i));
}
for (int i = 4; i < 7; ++i) {
- available_chunks.insert(i);
+ available_chunks.insert(shard_id_t(i));
}
int r = shec->_minimum_to_decode(want_to_decode, available_chunks,
shec->init(*profile, &cerr);
//minimum_to_decode
- set<int> want_to_decode;
- set<int> available_chunks;
- set<int> minimum_chunks;
+ shard_id_set want_to_decode;
+ shard_id_set available_chunks;
+ shard_id_set minimum_chunks;
for (int i = 0; i < 5; ++i) {
- want_to_decode.insert(i);
+ want_to_decode.insert(shard_id_t(i));
}
for (int i = 4; i < 7; ++i) {
- available_chunks.insert(i);
+ available_chunks.insert(shard_id_t(i));
}
int r = shec->_minimum_to_decode(want_to_decode, available_chunks,
shec->init(*profile, &cerr);
//minimum_to_decode
- set<int> want_to_decode;
- set<int> available_chunks;
+ shard_id_set want_to_decode;
+ shard_id_set available_chunks;
//minimum_chunks is NULL
for (int i = 0; i < 7; ++i) {
- want_to_decode.insert(i);
- available_chunks.insert(i);
+ want_to_decode.insert(shard_id_t(i));
+ available_chunks.insert(shard_id_t(i));
}
int r = shec->_minimum_to_decode(want_to_decode, available_chunks, NULL);
shec->init(*profile, &cerr);
//minimum_to_decode
- set<int> want_to_decode;
- set<int> available_chunks;
- set<int> minimum_chunks, minimum;
+ shard_id_set want_to_decode;
+ shard_id_set available_chunks;
+ shard_id_set minimum_chunks, minimum;
for (int i = 0; i < 7; ++i) {
- want_to_decode.insert(i);
- available_chunks.insert(i);
+ want_to_decode.insert(shard_id_t(i));
+ available_chunks.insert(shard_id_t(i));
}
shec->_minimum_to_decode(want_to_decode, available_chunks, &minimum_chunks);
minimum = minimum_chunks; //normal value
for (int i = 100; i < 120; ++i) {
- minimum_chunks.insert(i); //insert extra data
+ minimum_chunks.insert(shard_id_t(i)); //insert extra data
}
int r = shec->_minimum_to_decode(want_to_decode, available_chunks,
shec->init(*profile, &cerr);
//minimum_to_decode
- set<int> want_to_decode;
- set<int> available_chunks;
- set<int> minimum_chunks;
+ shard_id_set want_to_decode;
+ shard_id_set available_chunks;
+ shard_id_set minimum_chunks;
- want_to_decode.insert(0);
- available_chunks.insert(0);
- available_chunks.insert(1);
- available_chunks.insert(2);
+ want_to_decode.insert(shard_id_t(0));
+ available_chunks.insert(shard_id_t(0));
+ available_chunks.insert(shard_id_t(1));
+ available_chunks.insert(shard_id_t(2));
int r = shec->_minimum_to_decode(want_to_decode, available_chunks,
&minimum_chunks);
shec->init(*profile, &cerr);
//minimum_to_decode
- set<int> want_to_decode;
- set<int> available_chunks;
- set<int> minimum_chunks;
+ shard_id_set want_to_decode;
+ shard_id_set available_chunks;
+ shard_id_set minimum_chunks;
- want_to_decode.insert(0);
- want_to_decode.insert(2);
- available_chunks.insert(0);
- available_chunks.insert(1);
- available_chunks.insert(2);
- available_chunks.insert(3);
+ want_to_decode.insert(shard_id_t(0));
+ want_to_decode.insert(shard_id_t(2));
+ available_chunks.insert(shard_id_t(0));
+ available_chunks.insert(shard_id_t(1));
+ available_chunks.insert(shard_id_t(2));
+ available_chunks.insert(shard_id_t(3));
pthread_t tid;
g_flag = 0;
shec->init(*profile, &cerr);
//minimum_to_decode_with_cost
- set<int> want_to_decode;
- map<int, int> available_chunks;
- set<int> minimum_chunks;
+ shard_id_set want_to_decode;
+ shard_id_map<int> available_chunks(shec->get_chunk_count());
+ shard_id_set minimum_chunks;
for (int i = 0; i < 7; ++i) {
- want_to_decode.insert(i);
- available_chunks.insert(make_pair(i, i));
+ want_to_decode.insert(shard_id_t(i));
+ available_chunks.insert(shard_id_t(i), i);
}
int r = shec->minimum_to_decode_with_cost(want_to_decode, available_chunks,
shec->init(*profile, &cerr);
//minimum_to_decode_with_cost
- set<int> want_to_decode;
- map<int, int> available_chunks;
- set<int> minimum_chunks;
+ shard_id_set want_to_decode;
+ shard_id_map<int> available_chunks(shec->get_chunk_count());
+ shard_id_set minimum_chunks;
- want_to_decode.insert(0);
- want_to_decode.insert(2);
- available_chunks[0] = 0;
- available_chunks[1] = 1;
- available_chunks[2] = 2;
- available_chunks[3] = 3;
+ want_to_decode.insert(shard_id_t(0));
+ want_to_decode.insert(shard_id_t(2));
+ available_chunks[shard_id_t(0)] = 0;
+ available_chunks[shard_id_t(1)] = 1;
+ available_chunks[shard_id_t(2)] = 2;
+ available_chunks[shard_id_t(3)] = 3;
pthread_t tid;
g_flag = 0;
delete profile;
}
+IGNORE_DEPRECATED
TEST(ErasureCodeShec, encode_1)
{
//init
return NULL;
}
+END_IGNORE_DEPRECATED
\ No newline at end of file
int k;
int m;
int c;
- set<int> want;
- set<int> avail;
+ shard_id_set want;
+ shard_id_set avail;
};
struct std::vector<Recover_d> cannot_recover;
};
+IGNORE_DEPRECATED
TEST_P(ParameterTest, parameter_all)
{
int result;
//minimum_to_decode
//want_to_decode will be a combination that chooses 1~c from k+m
- set<int> want_to_decode, available_chunks, minimum_chunks;
+ shard_id_set want_to_decode, available_chunks, minimum_chunks;
int array_want_to_decode[shec->get_chunk_count()];
struct Recover_d comb;
do {
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
- available_chunks.insert(i);
+ available_chunks.insert(shard_id_t(i));
}
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
if (array_want_to_decode[i]) {
- want_to_decode.insert(i);
- available_chunks.erase(i);
+ want_to_decode.insert(shard_id_t(i));
+ available_chunks.erase(shard_id_t(i));
}
}
}
//minimum_to_decode_with_cost
- set<int> want_to_decode_with_cost, minimum_chunks_with_cost;
- map<int, int> available_chunks_with_cost;
+ shard_id_set want_to_decode_with_cost, minimum_chunks_with_cost;
+ shard_id_map<int> available_chunks_with_cost(shec->get_chunk_count());
for (unsigned int i = 0; i < 1; i++) {
- want_to_decode_with_cost.insert(i);
+ want_to_decode_with_cost.insert(shard_id_t(i));
}
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
- available_chunks_with_cost[i] = i;
+ available_chunks_with_cost[shard_id_t(i)] = i;
}
result = shec->minimum_to_decode_with_cost(
delete profile;
delete crush;
}
+END_IGNORE_DEPRECATED
INSTANTIATE_TEST_SUITE_P(Test, ParameterTest, ::testing::ValuesIn(param));
unsigned int unexpected_count = 0;
unsigned int value_count = 0;
-map<set<int>,set<set<int> > > shec_table;
+map<shard_id_set,set<shard_id_set>> shec_table;
constexpr int getint(std::initializer_list<int> is) {
int a = 0;
}
void create_table_shec432() {
- set<int> table_key,vec_avails;
- set<set<int> > table_value;
+ shard_id_set table_key, vec_avails;
+ set<shard_id_set> table_value;
for (int want_count = 0; want_count < 7; ++want_count) {
for (unsigned want = 1; want < (1<<7); ++want) {
{
for (int i = 0; i < 7; ++i) {
if (want & (1 << i)) {
- table_key.insert(i);
+ table_key.insert(shard_id_t(i));
}
}
}
vec_avails.clear();
for (int j = 0; j < 7; ++j) {
if (vec[i] & (1 << j)) {
- vec_avails.insert(j);
+ vec_avails.insert(shard_id_t(j));
}
}
table_value.insert(vec_avails);
}
}
-bool search_table_shec432(set<int> want_to_read, set<int> available_chunks) {
- set<set<int> > tmp;
- set<int> settmp;
+bool search_table_shec432(shard_id_set want_to_read, shard_id_set available_chunks) {
+ set<shard_id_set > tmp;
+ shard_id_set settmp;
bool found;
tmp = shec_table.find(want_to_read)->second;
- for (set<set<int> >::iterator itr = tmp.begin();itr != tmp.end(); ++itr) {
+ for (set<shard_id_set >::iterator itr = tmp.begin();itr != tmp.end(); ++itr) {
found = true;
value_count = 0;
settmp = *itr;
- for (set<int>::iterator setitr = settmp.begin();setitr != settmp.end(); ++setitr) {
+ for (shard_id_set::const_iterator setitr = settmp.begin();setitr != settmp.end(); ++setitr) {
if (!available_chunks.count(*setitr)) {
found = false;
}
return false;
}
+IGNORE_DEPRECATED
TEST(ParameterTest, combination_all)
{
const unsigned int kObjectSize = 128;
delete shec;
delete profile;
}
+END_IGNORE_DEPRECATED
int main(int argc, char **argv)
{
pthread_join(tid5, NULL);
}
+IGNORE_DEPRECATED
void* thread1(void* pParam)
{
TestParam* param = static_cast<TestParam*>(pParam);
return NULL;
}
+END_IGNORE_DEPRECATED
#include "erasure-code/ErasureCode.h"
#include "ceph_erasure_code_benchmark.h"
-using std::endl;
using std::cerr;
using std::cout;
using std::map;
std::vector<std::string> strs;
boost::split(strs, *i, boost::is_any_of("="));
if (strs.size() != 2) {
- cerr << "--parameter " << *i << " ignored because it does not contain exactly one =" << endl;
+ cerr << "--parameter " << *i << " ignored because it does not contain exactly one =" << std::endl;
} else {
profile[strs[0]] = strs[1];
}
m = stoi(profile["m"]);
} catch (const std::logic_error& e) {
cout << "Invalid k and/or m: k=" << profile["k"] << ", m=" << profile["m"]
- << " (" << e.what() << ")" << endl;
+ << " (" << e.what() << ")" << std::endl;
return -EINVAL;
}
if (k <= 0) {
- cout << "parameter k is " << k << ". But k needs to be > 0." << endl;
+ cout << "parameter k is " << k << ". But k needs to be > 0." << std::endl;
return -EINVAL;
} else if ( m < 0 ) {
- cout << "parameter m is " << m << ". But m needs to be >= 0." << endl;
+ cout << "parameter m is " << m << ". But m needs to be >= 0." << std::endl;
return -EINVAL;
}
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &messages);
if (code) {
- cerr << messages.str() << endl;
+ cerr << messages.str() << std::endl;
return code;
}
bufferlist in;
in.append(string(in_size, 'X'));
in.rebuild_aligned(ErasureCode::SIMD_ALIGN);
- set<int> want_to_encode;
- for (int i = 0; i < k + m; i++) {
+ shard_id_set want_to_encode;
+ for (shard_id_t i; i < k + m; ++i) {
want_to_encode.insert(i);
}
utime_t begin_time = ceph_clock_now();
for (int i = 0; i < max_iterations; i++) {
- std::map<int,bufferlist> encoded;
+ shard_id_map<bufferlist> encoded(erasure_code->get_chunk_count());
code = erasure_code->encode(want_to_encode, in, &encoded);
if (code)
return code;
}
utime_t end_time = ceph_clock_now();
- cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << endl;
+ cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << std::endl;
return 0;
}
-static void display_chunks(const map<int,bufferlist> &chunks,
+static void display_chunks(const shard_id_map<bufferlist> &chunks,
unsigned int chunk_count) {
cout << "chunks ";
- for (unsigned int chunk = 0; chunk < chunk_count; chunk++) {
+ for (shard_id_t chunk; chunk < chunk_count; ++chunk) {
if (chunks.count(chunk) == 0) {
cout << "(" << chunk << ")";
} else {
}
cout << " ";
}
- cout << "(X) is an erased chunk" << endl;
+ cout << "(X) is an erased chunk" << std::endl;
}
-int ErasureCodeBench::decode_erasures(const map<int,bufferlist> &all_chunks,
- const map<int,bufferlist> &chunks,
- unsigned i,
+int ErasureCodeBench::decode_erasures(const shard_id_map<bufferlist> &all_chunks,
+ const shard_id_map<bufferlist> &chunks,
+ shard_id_t shard,
unsigned want_erasures,
ErasureCodeInterfaceRef erasure_code)
{
if (want_erasures == 0) {
if (verbose)
display_chunks(chunks, erasure_code->get_chunk_count());
- set<int> want_to_read;
- for (unsigned int chunk = 0; chunk < erasure_code->get_chunk_count(); chunk++)
+ shard_id_set want_to_read;
+ for (shard_id_t chunk; chunk < erasure_code->get_chunk_count(); ++chunk)
if (chunks.count(chunk) == 0)
want_to_read.insert(chunk);
- map<int,bufferlist> decoded;
+ shard_id_map<bufferlist> decoded(erasure_code->get_chunk_count());
code = erasure_code->decode(want_to_read, chunks, &decoded, 0);
if (code)
return code;
- for (set<int>::iterator chunk = want_to_read.begin();
+ for (shard_id_set::const_iterator chunk = want_to_read.begin();
chunk != want_to_read.end();
++chunk) {
if (all_chunks.find(*chunk)->second.length() != decoded[*chunk].length()) {
cerr << "chunk " << *chunk << " length=" << all_chunks.find(*chunk)->second.length()
- << " decoded with length=" << decoded[*chunk].length() << endl;
+ << " decoded with length=" << decoded[*chunk].length() << std::endl;
return -1;
}
bufferlist tmp = all_chunks.find(*chunk)->second;
if (!tmp.contents_equal(decoded[*chunk])) {
cerr << "chunk " << *chunk
- << " content and recovered content are different" << endl;
+ << " content and recovered content are different" << std::endl;
return -1;
}
}
return 0;
}
- for (; i < erasure_code->get_chunk_count(); i++) {
- map<int,bufferlist> one_less = chunks;
- one_less.erase(i);
- code = decode_erasures(all_chunks, one_less, i + 1, want_erasures - 1, erasure_code);
+ for (; shard < erasure_code->get_chunk_count(); ++shard) {
+ shard_id_map<bufferlist> one_less = chunks;
+ one_less.erase(shard);
+ code = decode_erasures(all_chunks, one_less, shard + 1, want_erasures - 1, erasure_code);
if (code)
return code;
}
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &messages);
if (code) {
- cerr << messages.str() << endl;
+ cerr << messages.str() << std::endl;
return code;
}
in.append(string(in_size, 'X'));
in.rebuild_aligned(ErasureCode::SIMD_ALIGN);
- set<int> want_to_encode;
- for (int i = 0; i < k + m; i++) {
+ shard_id_set want_to_encode;
+ for (shard_id_t i; i < k + m; ++i) {
want_to_encode.insert(i);
}
- map<int,bufferlist> encoded;
+ shard_id_map<bufferlist> encoded(erasure_code->get_chunk_count());
code = erasure_code->encode(want_to_encode, in, &encoded);
if (code)
return code;
- set<int> want_to_read = want_to_encode;
+ shard_id_set want_to_read = want_to_encode;
if (erased.size() > 0) {
for (vector<int>::const_iterator i = erased.begin();
i != erased.end();
++i)
- encoded.erase(*i);
+ encoded.erase(shard_id_t(*i));
display_chunks(encoded, erasure_code->get_chunk_count());
}
utime_t begin_time = ceph_clock_now();
for (int i = 0; i < max_iterations; i++) {
if (exhaustive_erasures) {
- code = decode_erasures(encoded, encoded, 0, erasures, erasure_code);
+ code = decode_erasures(encoded, encoded, shard_id_t(0), erasures, erasure_code);
if (code)
return code;
} else if (erased.size() > 0) {
- map<int,bufferlist> decoded;
+ shard_id_map<bufferlist> decoded(erasure_code->get_chunk_count());
code = erasure_code->decode(want_to_read, encoded, &decoded, 0);
if (code)
return code;
} else {
- map<int,bufferlist> chunks = encoded;
+ shard_id_map<bufferlist> chunks = encoded;
for (int j = 0; j < erasures; j++) {
int erasure;
do {
erasure = rand() % ( k + m );
- } while(chunks.count(erasure) == 0);
- chunks.erase(erasure);
+ } while(chunks.count(shard_id_t(erasure)) == 0);
+ chunks.erase(shard_id_t(erasure));
}
- map<int,bufferlist> decoded;
+ shard_id_map<bufferlist> decoded(erasure_code->get_chunk_count());
code = erasure_code->decode(want_to_read, chunks, &decoded, 0);
if (code)
return code;
}
}
utime_t end_time = ceph_clock_now();
- cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << endl;
+ cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << std::endl;
return 0;
}
return err;
return ecbench.run();
} catch(po::error &e) {
- cerr << e.what() << endl;
+ cerr << e.what() << std::endl;
return 1;
}
}
public:
int setup(int argc, char** argv);
int run();
- int decode_erasures(const std::map<int, ceph::buffer::list> &all_chunks,
- const std::map<int, ceph::buffer::list> &chunks,
- unsigned i,
+ int decode_erasures(const shard_id_map<ceph::buffer::list> &all_chunks,
+ const shard_id_map<ceph::buffer::list> &chunks,
+ shard_id_t shard,
unsigned want_erasures,
ErasureCodeInterfaceRef erasure_code);
int decode();
int run_create();
int run_check();
int decode_erasures(ErasureCodeInterfaceRef erasure_code,
- set<int> erasures,
- map<int,bufferlist> chunks);
+ shard_id_set erasures,
+ shard_id_map<bufferlist> chunks);
string content_path();
- string chunk_path(unsigned int chunk);
+ string chunk_path(shard_id_t chunk);
};
int ErasureCodeNonRegression::setup(int argc, char** argv) {
create = vm.count("create") > 0;
if (!check && !create) {
- cerr << "must specifify either --check, or --create" << endl;
+ cerr << "must specifify either --check, or --create" << std::endl;
return 1;
}
std::vector<std::string> strs;
boost::split(strs, *i, boost::is_any_of("="));
if (strs.size() != 2) {
- cerr << "--parameter " << *i << " ignored because it does not contain exactly one =" << endl;
+ cerr << "--parameter " << *i << " ignored because it does not contain exactly one =" << std::endl;
} else {
profile[strs[0]] = strs[1];
}
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &messages);
if (code) {
- cerr << messages.str() << endl;
+ cerr << messages.str() << std::endl;
return code;
}
if (::mkdir(directory.c_str(), 0755)) {
- cerr << "mkdir(" << directory << "): " << cpp_strerror(errno) << endl;
+ cerr << "mkdir(" << directory << "): " << cpp_strerror(errno) << std::endl;
return 1;
}
unsigned payload_chunk_size = 37;
in.splice(stripe_width, in.length() - stripe_width);
if (in.write_file(content_path().c_str()))
return 1;
- set<int> want_to_encode;
- for (unsigned int i = 0; i < erasure_code->get_chunk_count(); i++) {
+ shard_id_set want_to_encode;
+ for (shard_id_t i; i < erasure_code->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
- map<int,bufferlist> encoded;
+ shard_id_map<bufferlist> encoded(erasure_code->get_chunk_count());
code = erasure_code->encode(want_to_encode, in, &encoded);
if (code)
return code;
- for (map<int,bufferlist>::iterator chunk = encoded.begin();
+ for (shard_id_map<bufferlist>::iterator chunk = encoded.begin();
chunk != encoded.end();
++chunk) {
if (chunk->second.write_file(chunk_path(chunk->first).c_str()))
}
int ErasureCodeNonRegression::decode_erasures(ErasureCodeInterfaceRef erasure_code,
- set<int> erasures,
- map<int,bufferlist> chunks)
+ shard_id_set erasures,
+ shard_id_map<bufferlist> chunks)
{
- map<int,bufferlist> available;
- for (map<int,bufferlist>::iterator chunk = chunks.begin();
+ shard_id_map<bufferlist> available(erasure_code->get_chunk_count());
+ for (shard_id_map<bufferlist>::iterator chunk = chunks.begin();
chunk != chunks.end();
++chunk) {
if (erasures.count(chunk->first) == 0)
available[chunk->first] = chunk->second;
}
- map<int,bufferlist> decoded;
+ shard_id_map<bufferlist> decoded(erasure_code->get_chunk_count());
int code = erasure_code->decode(erasures, available, &decoded, available.begin()->second.length());
if (code)
return code;
- for (set<int>::iterator erasure = erasures.begin();
+ for (shard_id_set::const_iterator erasure = erasures.begin();
erasure != erasures.end();
++erasure) {
if (!chunks[*erasure].contents_equal(decoded[*erasure])) {
- cerr << "chunk " << *erasure << " incorrectly recovered" << endl;
+ cerr << "chunk " << *erasure << " incorrectly recovered" << std::endl;
return 1;
}
}
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &messages);
if (code) {
- cerr << messages.str() << endl;
+ cerr << messages.str() << std::endl;
return code;
}
string errors;
bufferlist in;
if (in.read_file(content_path().c_str(), &errors)) {
- cerr << errors << endl;
+ cerr << errors << std::endl;
return 1;
}
- set<int> want_to_encode;
- for (unsigned int i = 0; i < erasure_code->get_chunk_count(); i++) {
+ shard_id_set want_to_encode;
+ for (shard_id_t i; i < erasure_code->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
- map<int,bufferlist> encoded;
+ shard_id_map<bufferlist> encoded(erasure_code->get_chunk_count());
code = erasure_code->encode(want_to_encode, in, &encoded);
if (code)
return code;
- for (map<int,bufferlist>::iterator chunk = encoded.begin();
+ for (shard_id_map<bufferlist>::iterator chunk = encoded.begin();
chunk != encoded.end();
++chunk) {
bufferlist existing;
if (existing.read_file(chunk_path(chunk->first).c_str(), &errors)) {
- cerr << errors << endl;
+ cerr << errors << std::endl;
return 1;
}
bufferlist &old = chunk->second;
if (existing.length() != old.length() ||
memcmp(existing.c_str(), old.c_str(), old.length())) {
- cerr << "chunk " << chunk->first << " encodes differently" << endl;
+ cerr << "chunk " << chunk->first << " encodes differently" << std::endl;
return 1;
}
}
// erasing a single chunk is likely to use a specific code path in every plugin
- set<int> erasures;
+ shard_id_set erasures;
erasures.clear();
- erasures.insert(0);
+ erasures.insert(shard_id_t());
code = decode_erasures(erasure_code, erasures, encoded);
if (code)
return code;
if (erasure_code->get_chunk_count() - erasure_code->get_data_chunk_count() > 1) {
// erasing two chunks is likely to be the general case
erasures.clear();
- erasures.insert(0);
- erasures.insert(erasure_code->get_chunk_count() - 1);
+ erasures.insert(shard_id_t());
+ erasures.insert(shard_id_t(erasure_code->get_chunk_count() - 1));
code = decode_erasures(erasure_code, erasures, encoded);
if (code)
return code;
return path.str();
}
-string ErasureCodeNonRegression::chunk_path(unsigned int chunk)
+string ErasureCodeNonRegression::chunk_path(shard_id_t chunk)
{
stringstream path;
path << directory << "/" << chunk;
std::lock_guard l{instance.lock};
int r = instance.load(
args[0], g_conf().get_val<std::string>("erasure_code_dir"), &plugin, &ss);
- std::cerr << ss.str() << endl;
+ std::cerr << ss.str() << std::endl;
return r;
}
return 1;
}
auto chunk = static_cast<ssize_t>(chunk_mapping.size()) > shard ?
- chunk_mapping[shard] : shard;
- want_to_read.insert(chunk);
+ chunk_mapping[shard] : shard_id_t(shard);
+ want_to_read.insert(static_cast<int>(chunk));
}
r = ECUtil::decode(*sinfo, ec_impl, want_to_read, encoded_data, &decoded_data);