=======================
- librbd Cache Settings
+ librbd Settings
=======================
See `Block Device`_ for additional details.
+Cache Settings
+=======================
+
.. sidebar:: Kernel Caching
The kernel driver for Ceph block devices can use the Linux page cache to
:Default: ``true``
.. _Block Device: ../../rbd/rbd/
+
+
+Read-ahead Settings
+=======================
+
+.. versionadded:: 0.86
+
+RBD supports read-ahead/prefetching to optimize small, sequential reads.
+This should normally be handled by the guest OS in the case of a VM,
+but boot loaders may not issue efficient reads.
+Read-ahead is automatically disabled if caching is disabled.
+
+
+``rbd readahead trigger requests``
+
+:Description: Number of sequential read requests necessary to trigger read-ahead.
+:Type: Integer
+:Required: No
+:Default: ``10``
+
+
+``rbd readahead max bytes``
+
+:Description: Maximum size of a read-ahead request. If zero, read-ahead is disabled.
+:Type: 64-bit Integer
+:Required: No
+:Default: ``512 KiB``
+
+
+``rbd readahead disable after bytes``
+
+:Description: After this many bytes have been read from an RBD image, read-ahead is disabled for that image until it is closed. This allows the guest OS to take over read-ahead once it is booted. If zero, read-ahead stays enabled.
+:Type: 64-bit Integer
+:Required: No
+:Default: ``50 MiB``
common/hobject.cc \
common/bloom_filter.cc \
common/linux_version.c \
- common/module.c
+ common/module.c \
+ common/Readahead.cc
# these should go out of libcommon
libcommon_la_SOURCES += \
common/cmdparse.h \
common/hobject.h \
common/linux_version.h \
- common/module.h
+ common/module.h \
+ common/Readahead.h
noinst_LTLIBRARIES += libcommon.la
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "Readahead.h"
+
+using namespace std;
+
+Readahead::Readahead()
+ : m_trigger_requests(10),
+ m_readahead_min_bytes(0),
+ m_readahead_max_bytes(NO_LIMIT),
+ m_alignments(),
+ m_lock("Readahead::m_lock"),
+ m_nr_consec_read(0),
+ m_consec_read_bytes(0),
+ m_last_pos(0),
+ m_readahead_pos(0),
+ m_readahead_trigger_pos(0),
+ m_readahead_size(0),
+ m_pending(0),
+ m_pending_lock("Readahead::m_pending_lock"),
+ m_pending_cond() {
+}
+
+Readahead::~Readahead() {
+}
+
+Readahead::extent_t Readahead::update(const vector<extent_t>& extents, uint64_t limit) {
+ m_lock.Lock();
+ for (vector<extent_t>::const_iterator p = extents.begin(); p != extents.end(); ++p) {
+ _observe_read(p->first, p->second);
+ }
+ pair<uint64_t, uint64_t> extent = _compute_readahead(limit);
+ m_lock.Unlock();
+ return extent;
+}
+
+Readahead::extent_t Readahead::update(uint64_t offset, uint64_t length, uint64_t limit) {
+ m_lock.Lock();
+ _observe_read(offset, length);
+ extent_t extent = _compute_readahead(limit);
+ m_lock.Unlock();
+ return extent;
+}
+
+void Readahead::_observe_read(uint64_t offset, uint64_t length) {
+ if (offset == m_last_pos) {
+ m_nr_consec_read++;
+ m_consec_read_bytes += length;
+ } else {
+ m_nr_consec_read = 0;
+ m_consec_read_bytes = 0;
+ m_readahead_trigger_pos = 0;
+ m_readahead_size = 0;
+ }
+ m_last_pos = offset + length;
+}
+
+Readahead::extent_t Readahead::_compute_readahead(uint64_t limit) {
+ uint64_t readahead_offset = 0;
+ uint64_t readahead_length = 0;
+ if (m_nr_consec_read >= m_trigger_requests) {
+ // currently reading sequentially
+ if (m_last_pos >= m_readahead_trigger_pos) {
+ // need to read ahead
+ if (m_readahead_size == 0) {
+ // initial readahead trigger
+ m_readahead_size = m_consec_read_bytes;
+ m_readahead_pos = m_last_pos;
+ } else {
+ // continuing readahead trigger
+ m_readahead_size *= 2;
+ }
+ m_readahead_size = MAX(m_readahead_size, m_readahead_min_bytes);
+ m_readahead_size = MIN(m_readahead_size, m_readahead_max_bytes);
+ readahead_offset = m_readahead_pos;
+ readahead_length = m_readahead_size;
+
+ // Snap to the first alignment possible
+ uint64_t readahead_end = readahead_offset + readahead_length;
+ for (vector<uint64_t>::iterator p = m_alignments.begin(); p != m_alignments.end(); ++p) {
+ // Align the readahead, if possible.
+ uint64_t alignment = *p;
+ uint64_t align_prev = readahead_end / alignment * alignment;
+ uint64_t align_next = align_prev + alignment;
+ uint64_t dist_prev = readahead_end - align_prev;
+ uint64_t dist_next = align_next - readahead_end;
+ if (dist_prev < readahead_length / 2 && dist_prev < dist_next) {
+ // we can snap to the previous alignment point by a less than 50% reduction in size
+ assert(align_prev > readahead_offset);
+ readahead_length = align_prev - readahead_offset;
+ break;
+ } else if(dist_next < readahead_length / 2) {
+ // we can snap to the next alignment point by a less than 50% increase in size
+ assert(align_next > readahead_offset);
+ readahead_length = align_next - readahead_offset;
+ break;
+ }
+ // Note that m_readahead_size should remain unadjusted.
+ }
+
+ if (m_readahead_pos + readahead_length > limit) {
+ readahead_length = limit - m_readahead_pos;
+ }
+
+ m_readahead_trigger_pos = m_readahead_pos + readahead_length / 2;
+ m_readahead_pos += readahead_length;
+ }
+ }
+ return extent_t(readahead_offset, readahead_length);
+}
+
+void Readahead::inc_pending(int count) {
+ assert(count > 0);
+ m_pending_lock.Lock();
+ m_pending += count;
+ m_pending_lock.Unlock();
+}
+
+void Readahead::dec_pending(int count) {
+ assert(count > 0);
+ m_pending_lock.Lock();
+ assert(m_pending >= count);
+ m_pending -= count;
+ if (m_pending == 0) {
+ m_pending_cond.Signal();
+ }
+ m_pending_lock.Unlock();
+}
+
+void Readahead::wait_for_pending() {
+ m_pending_lock.Lock();
+ while (m_pending > 0) {
+ m_pending_cond.Wait(m_lock);
+ }
+ m_pending_lock.Unlock();
+}
+
+void Readahead::set_trigger_requests(int trigger_requests) {
+ m_lock.Lock();
+ m_trigger_requests = trigger_requests;
+ m_lock.Unlock();
+}
+
+void Readahead::set_min_readahead_size(uint64_t min_readahead_size) {
+ m_lock.Lock();
+ m_readahead_min_bytes = min_readahead_size;
+ m_lock.Unlock();
+}
+
+void Readahead::set_max_readahead_size(uint64_t max_readahead_size) {
+ m_lock.Lock();
+ m_readahead_max_bytes = max_readahead_size;
+ m_lock.Unlock();
+}
+
+void Readahead::set_alignments(const vector<uint64_t> &alignments) {
+ m_lock.Lock();
+ m_alignments = alignments;
+ m_lock.Unlock();
+}
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_READAHEAD_H
+#define CEPH_READAHEAD_H
+
+#include "Mutex.h"
+#include "Cond.h"
+
+/**
+ This class provides common state and logic for code that needs to perform readahead
+ on linear things such as RBD images or files.
+ Unless otherwise specified, all methods are thread-safe.
+
+ Minimum and maximum readahead sizes may be violated by up to 50\% if alignment is enabled.
+ Minimum readahead size may be violated if the end of the readahead target is reached.
+ */
+class Readahead {
+public:
+ typedef std::pair<uint64_t, uint64_t> extent_t;
+
+ // equal to UINT64_MAX
+ static const uint64_t NO_LIMIT = 18446744073709551615ULL;
+
+ Readahead();
+
+ ~Readahead();
+
+ /**
+ Update state with new reads and return readahead to be performed.
+ If the length of the returned extent is 0, no readahead should be performed.
+ The readahead extent is guaranteed not to pass \c limit.
+
+ Note that passing in NO_LIMIT as the limit and truncating the returned extent
+ is not the same as passing in the correct limit, because the internal state
+ will differ in the two cases.
+
+ @param extents read operations since last call to update
+ @param limit size of the thing readahead is being applied to
+ */
+ extent_t update(const vector<extent_t>& extents, uint64_t limit);
+
+ /**
+ Update state with a new read and return readahead to be performed.
+ If the length of the returned extent is 0, no readahead should be performed.
+ The readahead extent is guaranteed not to pass \c limit.
+
+ Note that passing in NO_LIMIT as the limit and truncating the returned extent
+ is not the same as passing in the correct limit, because the internal state
+ will differ in the two cases.
+
+ @param offset offset of the read operation
+ @param length length of the read operation
+ @param limit size of the thing readahead is being applied to
+ */
+ extent_t update(uint64_t offset, uint64_t length, uint64_t limit);
+
+ /**
+ Increment the pending counter.
+ */
+ void inc_pending(int count = 1);
+
+ /**
+ Decrement the pending counter.
+ The counter must not be decremented below 0.
+ */
+ void dec_pending(int count = 1);
+
+ /**
+ Waits until the pending count reaches 0.
+ */
+ void wait_for_pending();
+
+ /**
+ Sets the number of sequential requests necessary to trigger readahead.
+ */
+ void set_trigger_requests(int trigger_requests);
+
+ /**
+ Sets the minimum size of a readahead request, in bytes.
+ */
+ void set_min_readahead_size(uint64_t min_readahead_size);
+
+ /**
+ Sets the maximum size of a readahead request, in bytes.
+ */
+ void set_max_readahead_size(uint64_t max_readahead_size);
+
+ /**
+ Sets the alignment units.
+ If the end point of a readahead request can be aligned to an alignment unit
+ by increasing or decreasing the size of the request by 50\% or less, it will.
+ Alignments are tested in order, so larger numbers should almost always come first.
+ */
+ void set_alignments(const std::vector<uint64_t> &alignments);
+
+private:
+ /**
+ Records that a read request has been received.
+ m_lock must be held while calling.
+ */
+ void _observe_read(uint64_t offset, uint64_t length);
+
+ /**
+ Computes the next readahead request.
+ m_lock must be held while calling.
+ */
+ extent_t _compute_readahead(uint64_t limit);
+
+ /// Number of sequential requests necessary to trigger readahead
+ int m_trigger_requests;
+
+ /// Minimum size of a readahead request, in bytes
+ uint64_t m_readahead_min_bytes;
+
+ /// Maximum size of a readahead request, in bytes
+ uint64_t m_readahead_max_bytes;
+
+ /// Alignment units, in bytes
+ std::vector<uint64_t> m_alignments;
+
+ /// Held while reading/modifying any state except m_pending
+ Mutex m_lock;
+
+ /// Number of consecutive read requests in the current sequential stream
+ int m_nr_consec_read;
+
+ /// Number of bytes read in the current sequenial stream
+ uint64_t m_consec_read_bytes;
+
+ /// Position of the read stream
+ uint64_t m_last_pos;
+
+ /// Position of the readahead stream
+ uint64_t m_readahead_pos;
+
+ /// When readahead is already triggered and the read stream crosses this point, readahead is continued
+ uint64_t m_readahead_trigger_pos;
+
+ /// Size of the next readahead request (barring changes due to alignment, etc.)
+ uint64_t m_readahead_size;
+
+ /// Number of pending readahead requests, as determined by inc_pending() and dec_pending()
+ int m_pending;
+
+ /// Lock for m_pending
+ Mutex m_pending_lock;
+
+ /// Signalled when m_pending reaches 0
+ Cond m_pending_cond;
+};
+
+#endif
OPTION(rbd_localize_snap_reads, OPT_BOOL, false)
OPTION(rbd_balance_parent_reads, OPT_BOOL, false)
OPTION(rbd_localize_parent_reads, OPT_BOOL, true)
+OPTION(rbd_readahead_trigger_requests, OPT_INT, 10) // number of sequential requests necessary to trigger readahead
+OPTION(rbd_readahead_max_bytes, OPT_LONGLONG, 512 * 1024) // set to 0 to disable readahead
+OPTION(rbd_readahead_disable_after_bytes, OPT_LONGLONG, 50 * 1024 * 1024) // how many bytes are read in total before readahead is disabled
/*
* The following options change the behavior for librbd's image creation methods that
format_string(NULL),
id(image_id), parent(NULL),
stripe_unit(0), stripe_count(0),
- object_cacher(NULL), writeback_handler(NULL), object_set(NULL)
+ object_cacher(NULL), writeback_handler(NULL), object_set(NULL),
+ readahead(),
+ total_bytes_read(0)
{
md_ctx.dup(p);
data_ctx.dup(p);
} else {
header_oid = old_header_name(name);
}
+
+ md_config_t *conf = cct->_conf;
+ vector<uint64_t> alignments;
+ alignments.push_back(stripe_count << order); // object set (in file striping terminology)
+ alignments.push_back(stripe_unit * stripe_count); // stripe
+ alignments.push_back(stripe_unit); // stripe unit
+ readahead.set_trigger_requests(conf->rbd_readahead_trigger_requests);
+ readahead.set_max_readahead_size(conf->rbd_readahead_max_bytes);
+ readahead.set_alignments(alignments);
+
return 0;
}
plb.add_u64_counter(l_librbd_snap_rollback, "snap_rollback");
plb.add_u64_counter(l_librbd_notify, "notify");
plb.add_u64_counter(l_librbd_resize, "resize");
+ plb.add_u64_counter(l_librbd_readahead, "readahead");
+ plb.add_u64_counter(l_librbd_readahead_bytes, "readahead_bytes");
perfcounter = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(perfcounter);
#include <vector>
#include "common/Mutex.h"
+#include "common/Readahead.h"
#include "common/RWLock.h"
#include "common/snap_types.h"
#include "include/buffer.h"
LibrbdWriteback *writeback_handler;
ObjectCacher::ObjectSet *object_set;
+ Readahead readahead;
+ uint64_t total_bytes_read;
+
/**
* Either image_name or image_id must be set.
* If id is not known, pass the empty std::string,
void close_image(ImageCtx *ictx)
{
ldout(ictx->cct, 20) << "close_image " << ictx << dendl;
+
+ ictx->readahead.wait_for_pending();
+
if (ictx->object_cacher)
ictx->shutdown_cache(); // implicitly flushes
else
return aio_read(ictx, image_extents, buf, bl, c);
}
+ static void readahead(ImageCtx *ictx,
+ const vector<pair<uint64_t,uint64_t> >& image_extents,
+ const md_config_t *conf)
+ {
+ uint64_t total_bytes = 0;
+ for (vector<pair<uint64_t,uint64_t> >::const_iterator p = image_extents.begin();
+ p != image_extents.end();
+ ++p) {
+ total_bytes += p->second;
+ }
+ ictx->md_lock.get_write();
+ bool abort = conf->rbd_readahead_disable_after_bytes != 0 &&
+ ictx->total_bytes_read > conf->rbd_readahead_disable_after_bytes;
+ ictx->total_bytes_read += total_bytes;
+ ictx->snap_lock.get_read();
+ uint64_t image_size = ictx->get_image_size(ictx->snap_id);
+ ictx->snap_lock.put_read();
+ ictx->md_lock.put_write();
+ if (abort) {
+ return;
+ }
+ pair<uint64_t, uint64_t> readahead_extent = ictx->readahead.update(image_extents, image_size);
+ uint64_t readahead_offset = readahead_extent.first;
+ uint64_t readahead_length = readahead_extent.second;
+
+ if (readahead_length > 0) {
+ ldout(ictx->cct, 20) << "(readahead logical) " << readahead_offset << "~" << readahead_length << dendl;
+ map<object_t,vector<ObjectExtent> > readahead_object_extents;
+ Striper::file_to_extents(ictx->cct, ictx->format_string, &ictx->layout,
+ readahead_offset, readahead_length, 0, readahead_object_extents);
+ for (map<object_t,vector<ObjectExtent> >::iterator p = readahead_object_extents.begin(); p != readahead_object_extents.end(); ++p) {
+ for (vector<ObjectExtent>::iterator q = p->second.begin(); q != p->second.end(); ++q) {
+ ldout(ictx->cct, 20) << "(readahead) oid " << q->oid << " " << q->offset << "~" << q->length << dendl;
+
+ struct C_RBD_Readahead : public Context {
+ ImageCtx *ictx;
+ object_t oid;
+ uint64_t offset;
+ uint64_t length;
+ C_RBD_Readahead(ImageCtx *ictx, object_t oid, uint64_t offset, uint64_t length)
+ : ictx(ictx), oid(oid), offset(offset), length(length) { }
+ void finish(int r) {
+ ldout(ictx->cct, 20) << "C_RBD_Readahead on " << oid << ": " << offset << "+" << length << dendl;
+ ictx->readahead.dec_pending();
+ }
+ };
+
+ Context *req_comp = new C_RBD_Readahead(ictx, q->oid, q->offset, q->length);
+ ictx->readahead.inc_pending();
+ ictx->aio_read_from_cache(q->oid, NULL,
+ q->length, q->offset,
+ req_comp);
+ }
+ }
+ ictx->perfcounter->inc(l_librbd_readahead);
+ ictx->perfcounter->inc(l_librbd_readahead_bytes, readahead_length);
+ }
+ }
+
int aio_read(ImageCtx *ictx, const vector<pair<uint64_t,uint64_t> >& image_extents,
char *buf, bufferlist *pbl, AioCompletion *c)
{
snap_t snap_id = ictx->snap_id;
ictx->snap_lock.put_read();
+ // readahead
+ const md_config_t *conf = ictx->cct->_conf;
+ if (ictx->object_cacher && conf->rbd_readahead_max_bytes > 0) {
+ readahead(ictx, image_extents, conf);
+ }
+
// map
map<object_t,vector<ObjectExtent> > object_extents;
l_librbd_notify,
l_librbd_resize,
+ l_librbd_readahead,
+ l_librbd_readahead_bytes,
+
l_librbd_last,
};
unittest_rbd_replay_CXXFLAGS = $(UNITTEST_CXXFLAGS)
check_PROGRAMS += unittest_rbd_replay
+unittest_readahead_SOURCES = test/common/Readahead.cc
+unittest_readahead_LDADD = $(UNITTEST_LDADD) $(CEPH_GLOBAL)
+unittest_readahead_CXXFLAGS = $(UNITTEST_CXXFLAGS) -O2
+check_PROGRAMS += unittest_readahead
+
check_SCRIPTS += test/pybind/test_ceph_argparse.py
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2014 Adam Crume <adamcrume@gmail.com>
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "common/Readahead.h"
+#include "gtest/gtest.h"
+#include <stdint.h>
+#include <boost/foreach.hpp>
+#include <cstdarg>
+
+
+#define ASSERT_RA(expected_offset, expected_length, ra) \
+ do { \
+ Readahead::extent_t e = ra; \
+ ASSERT_EQ((uint64_t)expected_length, e.second); \
+ if (expected_length) { \
+ ASSERT_EQ((uint64_t)expected_offset, e.first); \
+ } \
+ } while(0)
+
+TEST(Readahead, random_access) {
+ Readahead r;
+ r.set_trigger_requests(2);
+ ASSERT_RA(0, 0, r.update(1000, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1010, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(1030, 20, r.update(1020, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1040, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1060, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1080, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1100, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1200, 10, Readahead::NO_LIMIT));
+}
+
+TEST(Readahead, min_size_limit) {
+ Readahead r;
+ r.set_trigger_requests(2);
+ r.set_min_readahead_size(40);
+ ASSERT_RA(0, 0, r.update(1000, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1010, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(1030, 40, r.update(1020, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1030, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(1070, 80, r.update(1040, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1050, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1060, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1070, 10, Readahead::NO_LIMIT));
+}
+
+TEST(Readahead, max_size_limit) {
+ Readahead r;
+ r.set_trigger_requests(2);
+ r.set_max_readahead_size(50);
+ ASSERT_RA(0, 0, r.update(1000, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1010, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(1030, 20, r.update(1020, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(1050, 40, r.update(1030, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1040, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1050, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(1090, 50, r.update(1060, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1070, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1080, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1090, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1100, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(1140, 50, r.update(1110, 10, Readahead::NO_LIMIT));
+}
+
+TEST(Readahead, limit) {
+ Readahead r;
+ r.set_trigger_requests(2);
+ r.set_max_readahead_size(50);
+ uint64_t limit = 1100;
+ ASSERT_RA(0, 0, r.update(1000, 10, limit));
+ ASSERT_RA(0, 0, r.update(1010, 10, limit));
+ ASSERT_RA(1030, 20, r.update(1020, 10, limit));
+ ASSERT_RA(1050, 40, r.update(1030, 10, limit));
+ ASSERT_RA(0, 0, r.update(1040, 10, limit));
+ ASSERT_RA(0, 0, r.update(1050, 10, limit));
+ ASSERT_RA(1090, 10, r.update(1060, 10, limit));
+ ASSERT_RA(0, 0, r.update(1070, 10, limit));
+ ASSERT_RA(0, 0, r.update(1080, 10, limit));
+ ASSERT_RA(0, 0, r.update(1090, 10, limit));
+}
+
+TEST(Readahead, alignment) {
+ Readahead r;
+ r.set_trigger_requests(2);
+ vector<uint64_t> alignment;
+ alignment.push_back(100);
+ r.set_alignments(alignment);
+ ASSERT_RA(0, 0, r.update(1000, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1010, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(1030, 20, r.update(1020, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(1050, 50, r.update(1030, 10, Readahead::NO_LIMIT)); // internal readahead size 40
+ ASSERT_RA(0, 0, r.update(1040, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1050, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1060, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(1100, 100, r.update(1070, 10, Readahead::NO_LIMIT)); // internal readahead size 80
+ ASSERT_RA(0, 0, r.update(1080, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1090, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1100, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1110, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1120, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1130, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(1200, 200, r.update(1140, 10, Readahead::NO_LIMIT)); // internal readahead size 160
+ ASSERT_RA(0, 0, r.update(1150, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1160, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1170, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1180, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1190, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1200, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1210, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1220, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1230, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1240, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1250, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1260, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1270, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(0, 0, r.update(1280, 10, Readahead::NO_LIMIT));
+ ASSERT_RA(1400, 300, r.update(1290, 10, Readahead::NO_LIMIT)); // internal readahead size 320
+ ASSERT_RA(0, 0, r.update(1300, 10, Readahead::NO_LIMIT));
+}