}
}
-#if 0
-
- // wait for RD cap and/or a valid file size
- int issued;
- while (1) {
- issued = in->caps_issued();
-
- if (lazy) {
- // wait for lazy cap
- if ((issued & CEPH_CAP_FILE_LAZYIO) == 0) {
- dout(7) << " don't have lazy cap, waiting" << dendl;
- goto wait;
- }
- } else {
- // wait for RD cap?
- while (!in->caps_issued_mask(CEPH_CAP_FILE_RD)) {
- dout(7) << " don't have read cap, waiting" << dendl;
- goto wait;
- }
- }
-
- // async i/o?
- if ((issued & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_CACHE))) {
-
- // FIXME: this logic needs to move info FileCache!
-
- // wait for valid file size
- if (!in->have_valid_size()) {
- dout(7) << " don't have (rd+rdcache)|lease|excl for valid file size, waiting" << dendl;
- goto wait;
- }
-
- dout(10) << "file size: " << in->size << dendl;
- if (offset > 0 && (__u64)offset >= in->size) {
- if (movepos) unlock_fh_pos(f);
- return 0;
- }
- if ((__u64)(offset + size) > in->size)
- size = in->size - offset;
-
- if (size == 0) {
- dout(10) << "read is size=0, returning 0" << dendl;
- if (movepos) unlock_fh_pos(f);
- return 0;
- }
- break;
- } else {
- // unbuffered, sync i/o. we will defer to osd.
- break;
- }
-
- wait:
- wait_on_list(in->waitfor_caps);
- }
-
- in->get_cap_ref(CEPH_CAP_FILE_RD);
-
- int rvalue = 0;
- Mutex flock("Client::_read flock");
- Cond cond;
- bool done = false;
- Context *onfinish = new C_SafeCond(&flock, &cond, &done, &rvalue);
-
- int r = 0;
- if (g_conf.client_oc) {
-
- if (in->caps_issued_mask(CEPH_CAP_FILE_CACHE)) {
- // we will populate the cache here
- if (in->cap_refs[CEPH_CAP_FILE_CACHE] == 0)
- in->get_cap_ref(CEPH_CAP_FILE_CACHE);
-
- // readahead?
- if (f->nr_consec_read &&
- (g_conf.client_readahead_max_bytes ||
- g_conf.client_readahead_max_periods)) {
- loff_t l = f->consec_read_bytes * 2;
- if (g_conf.client_readahead_min)
- l = MAX(l, g_conf.client_readahead_min);
- if (g_conf.client_readahead_max_bytes)
- l = MIN(l, g_conf.client_readahead_max_bytes);
- loff_t p = ceph_file_layout_period(in->layout);
- if (g_conf.client_readahead_max_periods)
- l = MIN(l, g_conf.client_readahead_max_periods * p);
- if (l >= 2*p)
- // align with period
- l -= (offset+l) % p;
- // don't read past end of file
- if (offset+l > (loff_t)in->size)
- l = in->size - offset;
-
- dout(10) << "readahead " << f->nr_consec_read << " reads "
- << f->consec_read_bytes << " bytes ... readahead " << offset << "~" << l
- << " (caller wants " << offset << "~" << size << ")" << dendl;
- objectcacher->file_read(in->ino, &in->layout, in->snapid,
- offset, l, NULL, 0, 0);
- dout(10) << "readahead initiated" << dendl;
- }
-
- // read (and possibly block)
- if (in->snapid == CEPH_NOSNAP)
- r = objectcacher->file_read(in->ino, &in->layout, in->snapid,
- offset, size, bl, 0, onfinish);
- else
- r = objectcacher->file_read(in->ino, &in->layout, in->snapid,
- offset, size, bl, 0, onfinish);
-
-
- if (r == 0) {
- while (!done)
- cond.Wait(client_lock);
- r = rvalue;
- } else {
- // it was cached.
- delete onfinish;
- }
- } else {
- r = objectcacher->file_atomic_sync_read(in->ino, &in->layout, in->snapid,
- offset, size, bl, 0, client_lock);
- }
-
- } else {
- // object cache OFF -- non-atomic sync read from osd
-
- // do sync read
- int flags = 0;
- if (in->hack_balance_reads || g_conf.client_hack_balance_reads)
- flags |= CEPH_OSD_FLAG_BALANCE_READS;
- filer->read(in->ino, &in->layout, in->snapid,
- offset, size, bl, flags, onfinish);
-
- while (!done)
- cond.Wait(client_lock);
- r = rvalue;
- }
-
- if (movepos) {
- // adjust fd pos
- f->pos = offset+bl->length();
- unlock_fh_pos(f);
- }
-
- // adjust readahead state
- if (f->last_pos != offset) {
- f->nr_consec_read = f->consec_read_bytes = 0;
- } else {
- f->nr_consec_read++;
- }
- f->consec_read_bytes += bl->length();
- dout(10) << "readahead nr_consec_read " << f->nr_consec_read
- << " for " << f->consec_read_bytes << " bytes"
- << " .. last_pos " << f->last_pos << " .. offset " << offset
- << dendl;
- f->last_pos = offset+bl->length();
-
- // done!
- put_cap_ref(in, CEPH_CAP_FILE_RD);
-
- return rvalue;
-}
-#endif
-
/*
* we keep count of uncommitted sync writes on the inode, so that