#include <linux/task_io_accounting_ops.h>
#include "ceph_debug.h"
-int ceph_debug_addr __read_mostly = -1;
-#define DOUT_MASK DOUT_MASK_ADDR
-#define DOUT_VAR ceph_debug_addr
#include "super.h"
#include "osd_client.h"
return !TestSetPageDirty(page);
if (TestSetPageDirty(page)) {
- dout(20, "%p set_page_dirty %p idx %lu -- already dirty\n",
+ dout("%p set_page_dirty %p idx %lu -- already dirty\n",
mapping->host, page, page->index);
return 0;
}
ci->i_head_snapc = ceph_get_snap_context(snapc);
++ci->i_wrbuffer_ref_head;
++ci->i_wrbuffer_ref;
- dout(20, "%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
+ dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
"snapc %p seq %lld (%d snaps)\n",
mapping->host, page, page->index,
ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
page->private = (unsigned long)snapc;
SetPagePrivate(page);
} else {
- dout(20, "ANON set_page_dirty %p (raced truncate?)\n", page);
+ dout("ANON set_page_dirty %p (raced truncate?)\n", page);
undo = 1;
}
ci = ceph_inode(inode);
if (offset == 0) {
- dout(20, "%p invalidatepage %p idx %lu full dirty page %lu\n",
+ dout("%p invalidatepage %p idx %lu full dirty page %lu\n",
inode, page, page->index, offset);
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
ceph_put_snap_context(snapc);
page->private = 0;
ClearPagePrivate(page);
} else {
- dout(20, "%p invalidatepage %p idx %lu partial dirty page\n",
+ dout("%p invalidatepage %p idx %lu partial dirty page\n",
inode, page, page->index);
}
}
static int ceph_releasepage(struct page *page, gfp_t g)
{
struct inode *inode = page->mapping ? page->mapping->host : NULL;
- dout(20, "%p releasepage %p idx %lu\n", inode, page, page->index);
+ dout("%p releasepage %p idx %lu\n", inode, page, page->index);
WARN_ON(PageDirty(page));
WARN_ON(page->private);
WARN_ON(PagePrivate(page));
struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc;
int err = 0;
- dout(10, "readpage inode %p file %p page %p index %lu\n",
+ dout("readpage inode %p file %p page %p index %lu\n",
inode, filp, page, page->index);
err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
page->index << PAGE_SHIFT, PAGE_SIZE,
next_index = list_entry(page_list->prev, struct page, lru)->index;
list_for_each_entry_reverse(page, page_list, lru) {
if (page->index == next_index) {
- dout(20, "readpages page %d %p\n", contig_pages, page);
+ dout("readpages page %d %p\n", contig_pages, page);
pages[contig_pages] = page;
contig_pages++;
next_index++;
struct pagevec pvec;
loff_t offset;
- dout(10, "readpages %p file %p nr_pages %d\n",
+ dout("readpages %p file %p nr_pages %d\n",
inode, file, nr_pages);
pages = page_vector_from_list(page_list, &nr_pages);
if (add_to_page_cache(page, mapping, page->index, GFP_NOFS)) {
page_cache_release(page);
- dout(20, "readpages %p add_to_page_cache failed %p\n",
+ dout("readpages %p add_to_page_cache failed %p\n",
inode, page);
continue;
}
- dout(10, "readpages %p adding %p idx %lu\n", inode, page,
+ dout("readpages %p adding %p idx %lu\n", inode, page,
page->index);
flush_dcache_page(page);
SetPageUptodate(page);
struct ceph_cap_snap *capsnap = NULL;
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
- dout(20, " cap_snap %p snapc %p has %d dirty pages\n", capsnap,
+ dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
capsnap->context, capsnap->dirty_pages);
if (capsnap->dirty_pages) {
snapc = ceph_get_snap_context(capsnap->context);
}
if (!snapc && ci->i_snap_realm) {
snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
- dout(20, " head snapc %p has %d dirty pages\n",
+ dout(" head snapc %p has %d dirty pages\n",
snapc, ci->i_wrbuffer_ref_head);
}
return snapc;
struct ceph_snap_context *snapc;
u64 snap_size = 0;
- dout(10, "writepage %p idx %lu\n", page, page->index);
+ dout("writepage %p idx %lu\n", page, page->index);
if (!page->mapping || !page->mapping->host) {
- dout(10, "writepage %p - no mapping\n", page);
+ dout("writepage %p - no mapping\n", page);
return -EFAULT;
}
inode = page->mapping->host;
/* verify this is a writeable snap context */
snapc = (void *)page->private;
if (snapc == NULL) {
- dout(20, "writepage %p page %p not dirty?\n", inode, page);
+ dout("writepage %p page %p not dirty?\n", inode, page);
goto out;
}
if (snapc != get_oldest_context(inode, &snap_size)) {
- dout(10, "writepage %p page %p snapc %p not writeable - noop\n",
+ dout("writepage %p page %p snapc %p not writeable - noop\n",
inode, page, (void *)page->private);
/* we should only noop if called by kswapd */
WARN_ON((current->flags & PF_MEMALLOC) == 0);
if (i_size < page_off + len)
len = i_size - page_off;
- dout(10, "writepage %p page %p index %lu on %llu~%u\n",
+ dout("writepage %p page %p index %lu on %llu~%u\n",
inode, page, page->index, page_off, len);
set_page_writeback(page);
&inode->i_mtime,
&page, 1, 0, 0);
if (err < 0) {
- dout(20, "writepage setting page error %p\n", page);
+ dout("writepage setting page error %p\n", page);
SetPageError(page);
if (wbc)
wbc->pages_skipped++;
} else {
- dout(20, "writepage cleaned page %p\n", page);
+ dout("writepage cleaned page %p\n", page);
err = 0; /* vfs expects us to return 0 */
}
page->private = 0;
wrote = 0;
mapping_set_error(mapping, rc);
}
- dout(10, "writepages_finish %p rc %d bytes %llu wrote %d (pages)\n",
+ dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n",
inode, rc, bytes, wrote);
/* clean all pages */
WARN_ON(!PageUptodate(page));
if (i >= wrote) {
- dout(20, "inode %p skipping page %p\n", inode, page);
+ dout("inode %p skipping page %p\n", inode, page);
wbc->pages_skipped++;
}
page->private = 0;
ClearPagePrivate(page);
ceph_put_snap_context(snapc);
- dout(50, "unlocking %d %p\n", i, page);
+ dout("unlocking %d %p\n", i, page);
end_page_writeback(page);
unlock_page(page);
}
- dout(20, "%p wrote+cleaned %d pages\n", inode, wrote);
+ dout("%p wrote+cleaned %d pages\n", inode, wrote);
ceph_put_wrbuffer_cap_refs(ci, req->r_num_pages, snapc);
ceph_release_pages(req->r_pages, req->r_num_pages);
do_sync = wbc->sync_mode == WB_SYNC_ALL;
if (ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER))
do_sync = 1;
- dout(10, "writepages_start %p dosync=%d (pdflush=%d mode=%s)\n",
+ dout("writepages_start %p dosync=%d (pdflush=%d mode=%s)\n",
inode, do_sync, current_is_pdflush(),
wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
(wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
client = ceph_inode_to_client(inode);
if (client->mount_state == CEPH_MOUNT_SHUTDOWN) {
- dout(1, "writepage_start %p on forced umount\n", inode);
+ pr_warning("ceph writepage_start %p on forced umount\n", inode);
return -EIO; /* we're in a forced umount, don't write! */
}
if (client->mount_args.wsize && client->mount_args.wsize < wsize)
/* ?? */
if (wbc->nonblocking && bdi_write_congested(bdi)) {
- dout(20, " writepages congested\n");
+ dout(" writepages congested\n");
wbc->encountered_congestion = 1;
goto out_free;
}
if (wbc->range_cyclic) {
start = mapping->writeback_index; /* Start from prev offset */
end = -1;
- dout(20, " cyclic, start at %lu\n", start);
+ dout(" cyclic, start at %lu\n", start);
} else {
start = wbc->range_start >> PAGE_CACHE_SHIFT;
end = wbc->range_end >> PAGE_CACHE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
should_loop = 0;
- dout(20, " not cyclic, %lu to %lu\n", start, end);
+ dout(" not cyclic, %lu to %lu\n", start, end);
}
index = start;
if (!snapc) {
/* hmm, why does writepages get called when there
is no dirty data? */
- dout(20, " no snap context with dirty data?\n");
+ dout(" no snap context with dirty data?\n");
goto out;
}
- dout(20, " oldest snapc is %p seq %lld (%d snaps)\n",
+ dout(" oldest snapc is %p seq %lld (%d snaps)\n",
snapc, snapc->seq, snapc->num_snaps);
if (last_snapc && snapc != last_snapc) {
/* if we switched to a newer snapc, restart our scan at the
* start of the original file range. */
- dout(20, " snapc differs from last pass, restarting at %lu\n",
+ dout(" snapc differs from last pass, restarting at %lu\n",
index);
index = start;
}
pvec_pages = pagevec_lookup_tag(pvec, mapping, &index,
PAGECACHE_TAG_DIRTY,
want);
- dout(20, "pagevec_lookup_tag got %d\n", pvec_pages);
+ dout("pagevec_lookup_tag got %d\n", pvec_pages);
if (!pvec_pages && !locked_pages)
break;
for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
page = pvec->pages[i];
- dout(20, "? %p idx %lu\n", page, page->index);
+ dout("? %p idx %lu\n", page, page->index);
if (locked_pages == 0)
lock_page(page); /* first page */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
/* only dirty pages, or our accounting breaks */
if (unlikely(!PageDirty(page)) ||
unlikely(page->mapping != mapping)) {
- dout(20, "!dirty or !mapping %p\n", page);
+ dout("!dirty or !mapping %p\n", page);
unlock_page(page);
break;
}
if (!wbc->range_cyclic && page->index > end) {
- dout(20, "end of range %p\n", page);
+ dout("end of range %p\n", page);
done = 1;
unlock_page(page);
break;
}
if (next && (page->index != next)) {
- dout(20, "not consecutive %p\n", page);
+ dout("not consecutive %p\n", page);
unlock_page(page);
break;
}
if (wbc->sync_mode != WB_SYNC_NONE) {
- dout(20, "waiting on writeback %p\n", page);
+ dout("waiting on writeback %p\n", page);
wait_on_page_writeback(page);
}
if ((snap_size && page_offset(page) > snap_size) ||
(!snap_size &&
page_offset(page) > i_size_read(inode))) {
- dout(20, "%p page eof %llu\n", page, snap_size ?
+ dout("%p page eof %llu\n", page, snap_size ?
snap_size : i_size_read(inode));
done = 1;
unlock_page(page);
break;
}
if (PageWriteback(page)) {
- dout(20, "%p under writeback\n", page);
+ dout("%p under writeback\n", page);
unlock_page(page);
break;
}
/* only if matching snap context */
if (snapc != (void *)page->private) {
- dout(20, "page snapc %p != oldest %p\n",
+ dout("page snapc %p != oldest %p\n",
(void *)page->private, snapc);
unlock_page(page);
if (!locked_pages)
}
if (!clear_page_dirty_for_io(page)) {
- dout(20, "%p !clear_page_dirty_for_io\n", page);
+ dout("%p !clear_page_dirty_for_io\n", page);
unlock_page(page);
break;
}
/* note position of first page in pvec */
if (first < 0)
first = i;
- dout(20, "%p will write page %p idx %lu\n",
+ dout("%p will write page %p idx %lu\n",
inode, page, page->index);
set_page_writeback(page);
req->r_pages[locked_pages] = page;
if (pvec_pages && i == pvec_pages &&
locked_pages < max_pages) {
- dout(50, "reached end pvec, trying for more\n");
+ dout("reached end pvec, trying for more\n");
pagevec_reinit(pvec);
goto get_more_pages;
}
/* shift unused pages over in the pvec... we
* will need to release them below. */
for (j = i; j < pvec_pages; j++) {
- dout(50, " pvec leftover page %p\n",
+ dout(" pvec leftover page %p\n",
pvec->pages[j]);
pvec->pages[j-i+first] = pvec->pages[j];
}
offset = req->r_pages[0]->index << PAGE_CACHE_SHIFT;
len = min((snap_size ? snap_size : i_size_read(inode)) - offset,
(u64)locked_pages << PAGE_CACHE_SHIFT);
- dout(10, "writepages got %d pages at %llu~%llu\n",
+ dout("writepages got %d pages at %llu~%llu\n",
locked_pages, offset, len);
/* revise final length, page count */
done = 1;
release_pvec_pages:
- dout(50, "pagevec_release on %d pages (%p)\n", (int)pvec->nr,
+ dout("pagevec_release on %d pages (%p)\n", (int)pvec->nr,
pvec->nr ? pvec->pages[0] : NULL);
pagevec_release(pvec);
if (should_loop && !done) {
/* more to do; loop back to beginning of file */
- dout(40, "writepages looping back to beginning of file\n");
+ dout("writepages looping back to beginning of file\n");
should_loop = 0;
index = 0;
goto retry;
if (rc > 0)
rc = 0; /* vfs expects us to return 0 */
ceph_put_snap_context(snapc);
- dout(10, "writepages done, rc = %d\n", rc);
+ dout("writepages done, rc = %d\n", rc);
out_free:
kfree(pvec);
return rc;
return -ENOMEM;
*pagep = page;
- dout(10, "write_begin file %p inode %p page %p %d~%d\n", file,
+ dout("write_begin file %p inode %p page %p %d~%d\n", file,
inode, page, (int)pos, (int)len);
retry_locked:
up_read(&mdsc->snap_rwsem);
if (snapc != (void *)page->private) {
- dout(10, " page %p snapc %p not current or oldest\n",
+ dout(" page %p snapc %p not current or oldest\n",
page, (void *)page->private);
/*
* queue for writeback, and wait for snapc to
}
/* yay, writeable, do it now (without dropping page lock) */
- dout(10, " page %p snapc %p not current, but oldest\n",
+ dout(" page %p snapc %p not current, but oldest\n",
page, snapc);
if (!clear_page_dirty_for_io(page))
goto retry_locked;
}
if (PageUptodate(page)) {
- dout(20, " page %p already uptodate\n", page);
+ dout(" page %p already uptodate\n", page);
return 0;
}
if (page_off >= i_size ||
(pos_in_page == 0 && (pos+len) >= i_size &&
end_in_page - pos_in_page != PAGE_CACHE_SIZE)) {
- dout(20, " zeroing %p 0 - %d and %d - %d\n",
+ dout(" zeroing %p 0 - %d and %d - %d\n",
page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
zero_user_segments(page,
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
int check_cap = 0;
- dout(10, "write_end file %p inode %p page %p %d~%d (%d)\n", file,
+ dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
inode, page, (int)pos, (int)copied, (int)len);
/* zero the stale part of the page if we did a short copy */
else
len = size & ~PAGE_CACHE_MASK;
- dout(10, "page_mkwrite %p %llu~%llu page %p idx %lu\n", inode,
+ dout("page_mkwrite %p %llu~%llu page %p idx %lu\n", inode,
off, len, page, page->index);
ret = ceph_write_begin(vma->vm_file, inode->i_mapping, off, len, 0,
&locked_page, &fsdata);
ceph_write_end(vma->vm_file, inode->i_mapping, off, len, len,
locked_page, fsdata);
#endif
- dout(10, "page_mkwrite %p %llu~%llu = %d\n", inode, off, len, ret);
+ dout("page_mkwrite %p %llu~%llu = %d\n", inode, off, len, ret);
return ret;
}
#include <linux/wait.h>
#include "ceph_debug.h"
-
-int ceph_debug_caps __read_mostly = -1;
-#define DOUT_MASK DOUT_MASK_CAPS
-#define DOUT_VAR ceph_debug_caps
#include "super.h"
-
#include "decode.h"
#include "messenger.h"
LIST_HEAD(newcaps);
int ret = 0;
- dout(30, "reserve caps ctx=%p need=%d\n", ctx, need);
+ dout("reserve caps ctx=%p need=%d\n", ctx, need);
/* first reserve any caps that are already allocated */
spin_lock(&caps_list_lock);
spin_unlock(&caps_list_lock);
ctx->count = need;
- dout(30, "reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
+ dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
ctx, caps_total_count, caps_use_count, caps_reserve_count,
caps_avail_count);
return 0;
int ceph_unreserve_caps(struct ceph_cap_reservation *ctx)
{
- dout(30, "unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
+ dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
if (ctx->count) {
spin_lock(&caps_list_lock);
BUG_ON(caps_reserve_count < ctx->count);
caps_reserve_count -= ctx->count;
caps_avail_count += ctx->count;
ctx->count = 0;
- dout(30, "unreserve caps %d = %d used + %d resv + %d avail\n",
+ dout("unreserve caps %d = %d used + %d resv + %d avail\n",
caps_total_count, caps_use_count, caps_reserve_count,
caps_avail_count);
BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
return kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
spin_lock(&caps_list_lock);
- dout(30, "get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
+ dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
ctx, ctx->count, caps_total_count, caps_use_count,
caps_reserve_count, caps_avail_count);
BUG_ON(!ctx->count);
struct ceph_cap_reservation *ctx)
{
spin_lock(&caps_list_lock);
- dout(30, "put_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
+ dout("put_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
ctx, ctx ? ctx->count : 0, caps_total_count, caps_use_count,
caps_reserve_count, caps_avail_count);
caps_use_count--;
ma->caps_wanted_delay_min * HZ);
ci->i_hold_caps_max = round_jiffies(jiffies +
ma->caps_wanted_delay_max * HZ);
- dout(10, "__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
+ dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
}
/*
struct ceph_inode_info *ci)
{
__cap_set_timeouts(mdsc, ci);
- dout(10, "__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
+ dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
ci->i_ceph_flags, ci->i_hold_caps_max);
if (!mdsc->stopping) {
spin_lock(&mdsc->cap_delay_lock);
static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
{
- dout(10, "__cap_delay_requeue_front %p\n", &ci->vfs_inode);
+ dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
spin_lock(&mdsc->cap_delay_lock);
ci->i_ceph_flags |= CEPH_I_FLUSH;
if (!list_empty(&ci->i_cap_delay_list))
static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
{
- dout(10, "__cap_delay_cancel %p\n", &ci->vfs_inode);
+ dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
if (list_empty(&ci->i_cap_delay_list))
return;
spin_lock(&mdsc->cap_delay_lock);
int mds = session->s_mds;
int actual_wanted;
- dout(10, "add_cap %p mds%d cap %llx %s seq %d\n", inode,
+ dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
session->s_mds, cap_id, ceph_cap_string(issued), seq);
/*
if (S_ISDIR(inode->i_mode) &&
(issued & CEPH_CAP_FILE_SHARED) &&
(cap->issued & CEPH_CAP_FILE_SHARED) == 0) {
- dout(10, " marking %p NOT complete\n", inode);
+ dout(" marking %p NOT complete\n", inode);
ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
}
actual_wanted = __ceph_caps_wanted(ci);
if ((wanted & ~actual_wanted) ||
(issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
- dout(10, " issued %s, mds wanted %s, actual %s, queueing\n",
+ dout(" issued %s, mds wanted %s, actual %s, queueing\n",
ceph_cap_string(issued), ceph_cap_string(wanted),
ceph_cap_string(actual_wanted));
__cap_delay_requeue(mdsc, ci);
else if (ci->i_auth_cap == cap)
ci->i_auth_cap = NULL;
- dout(10, "add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
+ dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
ceph_cap_string(issued|cap->issued), seq, mds);
cap->cap_id = cap_id;
spin_unlock(&cap->session->s_cap_lock);
if (cap->gen < gen || time_after_eq(jiffies, ttl)) {
- dout(30, "__cap_is_valid %p cap %p issued %s "
+ dout("__cap_is_valid %p cap %p issued %s "
"but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
cap, ceph_cap_string(cap->issued), cap->gen, gen);
return 0;
cap = rb_entry(p, struct ceph_cap, ci_node);
if (!__cap_is_valid(cap))
continue;
- dout(30, "__ceph_caps_issued %p cap %p issued %s\n",
+ dout("__ceph_caps_issued %p cap %p issued %s\n",
&ci->vfs_inode, cap, ceph_cap_string(cap->issued));
have |= cap->issued;
if (implemented)
{
struct ceph_mds_session *s = cap->session;
- dout(20, "__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
+ dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
s->s_mds);
spin_lock(&s->s_cap_lock);
list_move_tail(&cap->session_caps, &s->s_caps);
int have = ci->i_snap_caps;
if ((have & mask) == mask) {
- dout(30, "__ceph_caps_issued_mask %p snap issued %s"
+ dout("__ceph_caps_issued_mask %p snap issued %s"
" (mask %s)\n", &ci->vfs_inode,
ceph_cap_string(have),
ceph_cap_string(mask));
if (!__cap_is_valid(cap))
continue;
if ((cap->issued & mask) == mask) {
- dout(30, "__ceph_caps_issued_mask %p cap %p issued %s"
+ dout("__ceph_caps_issued_mask %p cap %p issued %s"
" (mask %s)\n", &ci->vfs_inode, cap,
ceph_cap_string(cap->issued),
ceph_cap_string(mask));
/* does a combination of caps satisfy mask? */
have |= cap->issued;
if ((have & mask) == mask) {
- dout(30, "__ceph_caps_issued_mask %p combo issued %s"
+ dout("__ceph_caps_issued_mask %p combo issued %s"
" (mask %s)\n", &ci->vfs_inode,
ceph_cap_string(cap->issued),
ceph_cap_string(mask));
}
}
spin_unlock(&inode->i_lock);
- dout(30, "ceph_caps_revoking %p %s = %d\n", inode,
+ dout("ceph_caps_revoking %p %s = %d\n", inode,
ceph_cap_string(mask), ret);
return ret;
}
struct ceph_inode_info *ci = cap->ci;
struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc;
- dout(20, "__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
+ dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
/* remove from session list */
spin_lock(&session->s_cap_lock);
struct ceph_mds_caps *fc;
struct ceph_msg *msg;
- dout(10, "send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
+ dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
" seq %u/%u mseq %u follows %lld size %llu/%llu"
" xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
msg = list_first_entry(&session->s_cap_releases,
struct ceph_msg, list_head);
- dout(10, " adding %p release to mds%d msg %p (%d left)\n",
+ dout(" adding %p release to mds%d msg %p (%d left)\n",
inode, session->s_mds, msg, session->s_num_cap_releases);
BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
msg->front.iov_len += sizeof(*item);
if (le32_to_cpu(head->num) == CAPS_PER_RELEASE) {
- dout(10, " release msg %p full\n", msg);
+ dout(" release msg %p full\n", msg);
list_move_tail(&msg->list_head,
&session->s_cap_releases_done);
} else {
- dout(10, " release msg %p at %d/%d (%d)\n", msg,
+ dout(" release msg %p at %d/%d (%d)\n", msg,
(int)le32_to_cpu(head->num), (int)CAPS_PER_RELEASE,
(int)msg->front.iov_len);
}
int delayed = 0;
u64 flush_tid = 0;
- dout(10, "__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
+ dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
inode, cap, cap->session,
ceph_cap_string(held), ceph_cap_string(held & retain),
ceph_cap_string(revoking));
/* don't release wanted unless we've waited a bit. */
if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
time_before(jiffies, ci->i_hold_caps_min)) {
- dout(20, " delaying issued %s -> %s, wanted %s -> %s on send\n",
+ dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
ceph_cap_string(cap->issued),
ceph_cap_string(cap->issued & retain),
ceph_cap_string(cap->mds_wanted),
if (flushing) {
flush_tid = ++cap->session->s_cap_flush_tid;
ci->i_cap_flush_tid = flush_tid;
- dout(10, " cap_flush_tid %lld\n", flush_tid);
+ dout(" cap_flush_tid %lld\n", flush_tid);
}
keep = cap->implemented;
if (dropping & CEPH_CAP_FILE_CACHE) {
/* invalidate what we can */
- dout(20, "invalidating pages on %p\n", inode);
+ dout("invalidating pages on %p\n", inode);
invalidate_mapping_pages(&inode->i_data, 0, -1);
}
if (psession)
session = *psession;
- dout(10, "__flush_snaps %p\n", inode);
+ dout("__flush_snaps %p\n", inode);
retry:
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
/* avoid an infiniute loop after retry */
/* pick mds, take s_mutex */
mds = __ceph_get_cap_mds(ci, &mseq);
if (session && session->s_mds != mds) {
- dout(30, "oops, wrong session %p mutex\n", session);
+ dout("oops, wrong session %p mutex\n", session);
mutex_unlock(&session->s_mutex);
ceph_put_mds_session(session);
session = NULL;
session = __ceph_lookup_mds_session(mdsc, mds);
mutex_unlock(&mdsc->mutex);
if (session) {
- dout(10, "inverting session/ino locks on %p\n",
+ dout("inverting session/ino locks on %p\n",
session);
mutex_lock(&session->s_mutex);
}
atomic_inc(&capsnap->nref);
spin_unlock(&inode->i_lock);
- dout(10, "flush_snaps %p cap_snap %p follows %lld size %llu\n",
+ dout("flush_snaps %p cap_snap %p follows %lld size %llu\n",
inode, capsnap, next_follows, capsnap->size);
send_cap_msg(mdsc, ceph_vino(inode).ino, 0,
CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
mdsc->num_cap_flushing++;
ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
- dout(20, " inode %p now flushing seq %lld\n", &ci->vfs_inode,
+ dout(" inode %p now flushing seq %lld\n", &ci->vfs_inode,
ci->i_cap_flush_seq);
}
spin_unlock(&mdsc->cap_dirty_lock);
}
}
- dout(10, "check_caps %p file_want %s used %s dirty %s flushing %s"
+ dout("check_caps %p file_want %s used %s dirty %s flushing %s"
" issued %s retain %s %s%s%s\n", inode,
ceph_cap_string(file_wanted),
ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
u32 invalidating_gen = ci->i_rdcache_gen;
int ret;
- dout(10, "check_caps trying to invalidate on %p\n", inode);
+ dout("check_caps trying to invalidate on %p\n", inode);
spin_unlock(&inode->i_lock);
ret = invalidate_inode_pages2(&inode->i_data);
spin_lock(&inode->i_lock);
ci->i_rdcache_gen = 0;
ci->i_rdcache_revoking = 0;
} else {
- dout(10, "check_caps failed to invalidate pages\n");
+ dout("check_caps failed to invalidate pages\n");
/* we failed to invalidate pages. check these
caps again later. */
force_requeue = 1;
revoking = cap->implemented & ~cap->issued;
if (revoking)
- dout(10, "mds%d revoking %s\n", cap->mds,
+ dout("mds%d revoking %s\n", cap->mds,
ceph_cap_string(revoking));
if (cap == ci->i_auth_cap &&
/* request larger max_size from MDS? */
if (ci->i_wanted_max_size > ci->i_max_size &&
ci->i_wanted_max_size > ci->i_requested_max_size) {
- dout(10, "requesting new max_size\n");
+ dout("requesting new max_size\n");
goto ack;
}
/* approaching file_max? */
if ((inode->i_size << 1) >= ci->i_max_size &&
(ci->i_reported_size << 1) < ci->i_max_size) {
- dout(10, "i_size approaching max_size\n");
+ dout("i_size approaching max_size\n");
goto ack;
}
}
/* flush anything dirty? */
if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
ci->i_dirty_caps) {
- dout(10, "flushing dirty caps\n");
+ dout("flushing dirty caps\n");
goto ack;
}
/* completed revocation? going down and there are no caps? */
if (revoking && (revoking & used) == 0) {
- dout(10, "completed revocation of %s\n",
+ dout("completed revocation of %s\n",
ceph_cap_string(cap->implemented & ~cap->issued));
goto ack;
}
/* delay? */
if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
time_before(jiffies, ci->i_hold_caps_max)) {
- dout(30, " delaying issued %s -> %s, wanted %s -> %s\n",
+ dout(" delaying issued %s -> %s, wanted %s -> %s\n",
ceph_cap_string(cap->issued),
ceph_cap_string(cap->issued & retain),
ceph_cap_string(cap->mds_wanted),
ack:
if (session && session != cap->session) {
- dout(30, "oops, wrong session %p mutex\n", session);
+ dout("oops, wrong session %p mutex\n", session);
mutex_unlock(&session->s_mutex);
session = NULL;
}
if (!session) {
session = cap->session;
if (mutex_trylock(&session->s_mutex) == 0) {
- dout(10, "inverting session/ino locks on %p\n",
+ dout("inverting session/ino locks on %p\n",
session);
spin_unlock(&inode->i_lock);
if (took_snap_rwsem) {
/* take snap_rwsem after session mutex */
if (!took_snap_rwsem) {
if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
- dout(10, "inverting snap/in locks on %p\n",
+ dout("inverting snap/in locks on %p\n",
inode);
spin_unlock(&inode->i_lock);
down_read(&mdsc->snap_rwsem);
if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
/* update dirty, flushing bits */
flushing = ci->i_dirty_caps;
- dout(10, " flushing %s, flushing_caps %s -> %s\n",
+ dout(" flushing %s, flushing_caps %s -> %s\n",
ceph_cap_string(flushing),
ceph_cap_string(ci->i_flushing_caps),
ceph_cap_string(ci->i_flushing_caps | flushing));
int was = __ceph_caps_dirty(ci);
int dirty = 0;
- dout(20, "__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
+ dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
ceph_cap_string(mask), ceph_cap_string(ci->i_dirty_caps),
ceph_cap_string(ci->i_dirty_caps | mask));
ci->i_dirty_caps |= mask;
if (!was) {
- dout(20, " inode %p now dirty\n", &ci->vfs_inode);
+ dout(" inode %p now dirty\n", &ci->vfs_inode);
spin_lock(&mdsc->cap_dirty_lock);
list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
spin_unlock(&mdsc->cap_dirty_lock);
__mark_caps_flushing(inode, session);
flushing = ci->i_dirty_caps;
- dout(10, " flushing %s, flushing_caps %s -> %s\n",
+ dout(" flushing %s, flushing_caps %s -> %s\n",
ceph_cap_string(flushing),
ceph_cap_string(ci->i_flushing_caps),
ceph_cap_string(ci->i_flushing_caps | flushing));
int err = 0;
int dirty;
- dout(10, "write_inode %p wait=%d\n", inode, wait);
+ dout("write_inode %p wait=%d\n", inode, wait);
if (wait) {
dirty = try_flush_caps(inode, NULL);
if (dirty)
{
struct ceph_inode_info *ci;
- dout(10, "kick_flushing_caps mds%d\n", session->s_mds);
+ dout("kick_flushing_caps mds%d\n", session->s_mds);
list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
struct inode *inode = &ci->vfs_inode;
struct ceph_cap *cap;
spin_lock(&inode->i_lock);
cap = ci->i_auth_cap;
if (cap && cap->session == session) {
- dout(20, "kick_flushing_caps %p cap %p %s\n", inode,
+ dout("kick_flushing_caps %p cap %p %s\n", inode,
cap, ceph_cap_string(ci->i_flushing_caps));
__send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
__ceph_caps_used(ci),
if (ci->i_wrbuffer_ref == 0)
igrab(&ci->vfs_inode);
ci->i_wrbuffer_ref++;
- dout(30, "__take_cap_refs %p wrbuffer %d -> %d (?)\n",
+ dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n",
&ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref);
}
}
int ret = 0;
int have, implemented;
- dout(30, "get_cap_refs %p need %s want %s\n", inode,
+ dout("get_cap_refs %p need %s want %s\n", inode,
ceph_cap_string(need), ceph_cap_string(want));
spin_lock(&inode->i_lock);
if (need & CEPH_CAP_FILE_WR) {
if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
- dout(20, "get_cap_refs %p endoff %llu > maxsize %llu\n",
+ dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
inode, endoff, ci->i_max_size);
if (endoff > ci->i_wanted_max_size) {
*check_max = 1;
* can get a final snapshot value for size+mtime.
*/
if (__ceph_have_pending_cap_snap(ci)) {
- dout(20, "get_cap_refs %p cap_snap_pending\n", inode);
+ dout("get_cap_refs %p cap_snap_pending\n", inode);
goto out;
}
}
*/
int not = want & ~(have & need);
int revoking = implemented & ~have;
- dout(30, "get_cap_refs %p have %s but not %s (revoking %s)\n",
+ dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
inode, ceph_cap_string(have), ceph_cap_string(not),
ceph_cap_string(revoking));
if ((revoking & not) == 0) {
ret = 1;
}
} else {
- dout(30, "get_cap_refs %p have %s needed %s\n", inode,
+ dout("get_cap_refs %p have %s needed %s\n", inode,
ceph_cap_string(have), ceph_cap_string(need));
}
out:
spin_unlock(&inode->i_lock);
- dout(30, "get_cap_refs %p ret %d got %s\n", inode,
+ dout("get_cap_refs %p ret %d got %s\n", inode,
ret, ceph_cap_string(*got));
return ret;
}
if ((endoff >= ci->i_max_size ||
endoff > (inode->i_size << 1)) &&
endoff > ci->i_wanted_max_size) {
- dout(10, "write %p at large endoff %llu, req max_size\n",
+ dout("write %p at large endoff %llu, req max_size\n",
inode, endoff);
ci->i_wanted_max_size = endoff;
check = 1;
if (had & CEPH_CAP_FILE_BUFFER) {
if (--ci->i_wrbuffer_ref == 0)
last++;
- dout(30, "put_cap_refs %p wrbuffer %d -> %d (?)\n",
+ dout("put_cap_refs %p wrbuffer %d -> %d (?)\n",
inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref);
}
if (had & CEPH_CAP_FILE_WR)
}
spin_unlock(&inode->i_lock);
- dout(30, "put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had),
+ dout("put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had),
last ? "last" : "");
if (last && !flushsnaps)
ceph_put_snap_context(ci->i_head_snapc);
ci->i_head_snapc = NULL;
}
- dout(30, "put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
+ dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
inode,
ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
}
}
BUG_ON(!found);
- dout(30, "put_wrbuffer_cap_refs on %p cap_snap %p "
+ dout("put_wrbuffer_cap_refs on %p cap_snap %p "
" snap %lld %d/%d -> %d/%d %s%s\n",
inode, capsnap, capsnap->context->seq,
ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
int tried_invalidate = 0;
int ret;
- dout(10, "handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
+ dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
inode, cap, mds, seq, ceph_cap_string(newcaps));
- dout(10, " size %llu max_size %llu, i_size %llu\n", size, max_size,
+ dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
inode->i_size);
start:
ci->i_rdcache_gen++;
if (S_ISDIR(inode->i_mode)) {
- dout(10, " marking %p NOT complete\n", inode);
+ dout(" marking %p NOT complete\n", inode);
ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
}
}
*/
if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
!ci->i_wrbuffer_ref && !tried_invalidate) {
- dout(10, "CACHE invalidation\n");
+ dout("CACHE invalidation\n");
spin_unlock(&inode->i_lock);
tried_invalidate = 1;
inode->i_mode = le32_to_cpu(grant->mode);
inode->i_uid = le32_to_cpu(grant->uid);
inode->i_gid = le32_to_cpu(grant->gid);
- dout(20, "%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
+ dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
inode->i_uid, inode->i_gid);
}
if (!(len > 4 && *xattr_data == NULL) && /* ENOMEM in caller */
version > ci->i_xattrs.version) {
- dout(20, " got new xattrs v%llu on %p len %d\n",
+ dout(" got new xattrs v%llu on %p len %d\n",
version, inode, len);
kfree(ci->i_xattrs.data);
ci->i_xattrs.len = len;
/* max size increase? */
if (max_size != ci->i_max_size) {
- dout(10, "max_size %lld -> %llu\n", ci->i_max_size, max_size);
+ dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
ci->i_max_size = max_size;
if (max_size >= ci->i_wanted_max_size) {
ci->i_wanted_max_size = 0; /* reset */
wanted = __ceph_caps_wanted(ci);
used = __ceph_caps_used(ci);
dirty = __ceph_caps_dirty(ci);
- dout(10, " my wanted = %s, used = %s, dirty %s\n",
+ dout(" my wanted = %s, used = %s, dirty %s\n",
ceph_cap_string(wanted),
ceph_cap_string(used),
ceph_cap_string(dirty));
if (wanted != le32_to_cpu(grant->wanted)) {
- dout(10, "mds wanted %s -> %s\n",
+ dout("mds wanted %s -> %s\n",
ceph_cap_string(le32_to_cpu(grant->wanted)),
ceph_cap_string(wanted));
grant->wanted = cpu_to_le32(wanted);
/* revocation, grant, or no-op? */
if (cap->issued & ~newcaps) {
- dout(10, "revocation: %s -> %s\n", ceph_cap_string(cap->issued),
+ dout("revocation: %s -> %s\n", ceph_cap_string(cap->issued),
ceph_cap_string(newcaps));
if ((used & ~newcaps) & CEPH_CAP_FILE_BUFFER) {
writeback = 1; /* will delay ack */
}
cap->issued = newcaps;
} else if (cap->issued == newcaps) {
- dout(10, "caps unchanged: %s -> %s\n",
+ dout("caps unchanged: %s -> %s\n",
ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
} else {
- dout(10, "grant: %s -> %s\n", ceph_cap_string(cap->issued),
+ dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
ceph_cap_string(newcaps));
cap->issued = newcaps;
cap->implemented |= newcaps; /* add bits only, to
* filemap_write_and_wait, etc. from message handler
* context.
*/
- dout(10, "queueing %p for writeback\n", inode);
+ dout("queueing %p for writeback\n", inode);
if (ceph_queue_writeback(inode))
igrab(inode);
}
if (invalidate_async) {
- dout(10, "queueing %p for page invalidation\n", inode);
+ dout("queueing %p for page invalidation\n", inode);
if (ceph_queue_page_invalidation(inode))
igrab(inode);
}
u64 flush_tid = le64_to_cpu(m->client_tid);
int old_dirty = 0, new_dirty = 0;
- dout(10, "handle_cap_flush_ack inode %p mds%d seq %d cleaned %s,"
+ dout("handle_cap_flush_ack inode %p mds%d seq %d cleaned %s,"
" flushing %s -> %s\n",
inode, session->s_mds, seq, ceph_cap_string(cleaned),
ceph_cap_string(ci->i_flushing_caps),
ceph_cap_string(ci->i_flushing_caps & ~cleaned));
if (flush_tid != ci->i_cap_flush_tid) {
- dout(10, " flush_tid %lld != my flush_tid %lld, ignoring\n",
+ dout(" flush_tid %lld != my flush_tid %lld, ignoring\n",
flush_tid, ci->i_cap_flush_tid);
} else {
old_dirty = ci->i_dirty_caps | ci->i_flushing_caps;
spin_lock(&mdsc->cap_dirty_lock);
list_del_init(&ci->i_flushing_item);
if (!list_empty(&session->s_cap_flushing))
- dout(20, " mds%d still flushing cap on %p\n",
+ dout(" mds%d still flushing cap on %p\n",
session->s_mds,
&list_entry(session->s_cap_flushing.next,
struct ceph_inode_info,
i_flushing_item)->vfs_inode);
mdsc->num_cap_flushing--;
wake_up(&mdsc->cap_flushing_wq);
- dout(20, " inode %p now !flushing\n", inode);
+ dout(" inode %p now !flushing\n", inode);
if (!new_dirty) {
- dout(20, " inode %p now clean\n", inode);
+ dout(" inode %p now clean\n", inode);
list_del_init(&ci->i_dirty_item);
}
spin_unlock(&mdsc->cap_dirty_lock);
struct ceph_cap_snap *capsnap;
int drop = 0;
- dout(10, "handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
+ dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
inode, ci, session->s_mds, follows);
spin_lock(&inode->i_lock);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
if (capsnap->follows == follows) {
if (capsnap->flush_tid != flush_tid) {
- dout(10, " cap_snap %p follows %lld tid %lld !="
+ dout(" cap_snap %p follows %lld tid %lld !="
" %lld\n", capsnap, follows,
flush_tid, capsnap->flush_tid);
break;
}
WARN_ON(capsnap->dirty_pages || capsnap->writing);
- dout(10, " removing cap_snap %p follows %lld\n",
+ dout(" removing cap_snap %p follows %lld\n",
capsnap, follows);
ceph_put_snap_context(capsnap->context);
list_del(&capsnap->ci_item);
drop = 1;
break;
} else {
- dout(10, " skipping cap_snap %p follows %lld\n",
+ dout(" skipping cap_snap %p follows %lld\n",
capsnap, capsnap->follows);
}
}
issued |= implemented | dirty;
- dout(10, "handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
+ dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
inode, mds, seq, truncate_size, truncate_seq);
queue_trunc = ceph_fill_file_size(inode, issued,
truncate_seq, truncate_size, size);
struct rb_node *p;
int remember = 1;
- dout(10, "handle_cap_export inode %p ci %p mds%d mseq %d\n",
+ dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
inode, ci, mds, mseq);
spin_lock(&inode->i_lock);
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
t = rb_entry(p, struct ceph_cap, ci_node);
if (ceph_seq_cmp(t->mseq, mseq) > 0) {
- dout(10, " higher mseq on cap from mds%d\n",
+ dout(" higher mseq on cap from mds%d\n",
t->session->s_mds);
remember = 0;
}
if (ci->i_cap_exporting_mds >= 0 &&
ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) {
- dout(10, "handle_cap_import inode %p ci %p mds%d mseq %d"
+ dout("handle_cap_import inode %p ci %p mds%d mseq %d"
" - cleared exporting from mds%d\n",
inode, ci, mds, mseq,
ci->i_cap_exporting_mds);
ci->i_cap_exporting_mseq = 0;
ci->i_cap_exporting_mds = -1;
} else {
- dout(10, "handle_cap_import inode %p ci %p mds%d mseq %d\n",
+ dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
inode, ci, mds, mseq);
}
void *xattr_data = NULL;
int r;
- dout(10, "handle_caps from mds%d\n", mds);
+ dout("handle_caps from mds%d\n", mds);
/* decode */
if (msg->front.iov_len < sizeof(*h))
session = __ceph_lookup_mds_session(mdsc, mds);
mutex_unlock(&mdsc->mutex);
if (!session) {
- dout(10, "WTF, got cap but no session for mds%d\n", mds);
+ dout("WTF, got cap but no session for mds%d\n", mds);
return;
}
mutex_lock(&session->s_mutex);
session->s_seq++;
- dout(20, " mds%d seq %lld\n", session->s_mds, session->s_seq);
+ dout(" mds%d seq %lld\n", session->s_mds, session->s_seq);
/* lookup ino */
inode = ceph_find_inode(sb, vino);
- dout(20, " op %s ino %llx inode %p\n", ceph_cap_op_name(op), vino.ino,
+ dout(" op %s ino %llx inode %p\n", ceph_cap_op_name(op), vino.ino,
inode);
if (!inode) {
- dout(10, " i don't have ino %llx\n", vino.ino);
+ dout(" i don't have ino %llx\n", vino.ino);
goto done;
}
spin_lock(&inode->i_lock);
cap = __get_cap_for_mds(ceph_inode(inode), mds);
if (!cap) {
- dout(10, "no cap on %p ino %llx.%llx from mds%d, releasing\n",
+ dout("no cap on %p ino %llx.%llx from mds%d, releasing\n",
inode, ceph_ino(inode), ceph_snap(inode), mds);
spin_unlock(&inode->i_lock);
goto done;
case CEPH_CAP_OP_GRANT:
r = handle_cap_grant(inode, h, session, cap, &xattr_data);
if (r == 1) {
- dout(10, " sending reply back to mds%d\n", mds);
+ dout(" sending reply back to mds%d\n", mds);
ceph_msg_get(msg);
ceph_send_msg_mds(mdsc, msg, mds);
} else if (r == 2) {
if (flushdirty)
flags |= CHECK_CAPS_FLUSH;
- dout(10, "check_delayed_caps\n");
+ dout("check_delayed_caps\n");
while (1) {
spin_lock(&mdsc->cap_delay_lock);
if (list_empty(&mdsc->cap_delay_list))
break;
list_del_init(&ci->i_cap_delay_list);
spin_unlock(&mdsc->cap_delay_lock);
- dout(10, "check_delayed_caps on %p\n", &ci->vfs_inode);
+ dout("check_delayed_caps on %p\n", &ci->vfs_inode);
ceph_check_caps(ci, flags, NULL);
}
spin_unlock(&mdsc->cap_delay_lock);
int last = 0;
spin_lock(&inode->i_lock);
- dout(20, "put_fmode %p fmode %d %d -> %d\n", inode, fmode,
+ dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
BUG_ON(ci->i_nr_by_mode[fmode] == 0);
if (--ci->i_nr_by_mode[fmode] == 0)
struct ceph_mds_request_release *rel = *p;
int ret = 0;
- dout(10, "encode_inode_release %p mds%d drop %s unless %s\n", inode,
+ dout("encode_inode_release %p mds%d drop %s unless %s\n", inode,
mds, ceph_cap_string(drop), ceph_cap_string(unless));
spin_lock(&inode->i_lock);
(cap->issued & unless) == 0)) {
if ((cap->issued & drop) &&
(cap->issued & unless) == 0) {
- dout(10, "encode_inode_release %p cap %p %s -> "
+ dout("encode_inode_release %p cap %p %s -> "
"%s\n", inode, cap,
ceph_cap_string(cap->issued),
ceph_cap_string(cap->issued & ~drop));
cap->implemented &= ~drop;
if (ci->i_ceph_flags & CEPH_I_NODELAY) {
int wanted = __ceph_caps_wanted(ci);
- dout(10, " wanted %s -> %s (act %s)\n",
+ dout(" wanted %s -> %s (act %s)\n",
ceph_cap_string(cap->mds_wanted),
ceph_cap_string(cap->mds_wanted &
~wanted),
cap->mds_wanted &= wanted;
}
} else {
- dout(10, "encode_inode_release %p cap %p %s"
+ dout("encode_inode_release %p cap %p %s"
" (force)\n", inode, cap,
ceph_cap_string(cap->issued));
}
*p += sizeof(*rel);
ret = 1;
} else {
- dout(10, "encode_inode_release %p cap %p %s\n",
+ dout("encode_inode_release %p cap %p %s\n",
inode, cap, ceph_cap_string(cap->issued));
}
}
/* drop dentry lease too? */
spin_lock(&dentry->d_lock);
if (ret && di->lease_session && di->lease_session->s_mds == mds) {
- dout(10, "encode_dentry_release %p mds%d seq %d\n",
+ dout("encode_dentry_release %p mds%d seq %d\n",
dentry, mds, (int)di->lease_seq);
rel->dname_len = cpu_to_le32(dentry->d_name.len);
memcpy(*p, dentry->d_name.name, dentry->d_name.len);
valid.)
-Debugging options (these are also changeable via debugfs):
-
- debug=N
- Specify the level of debug output for the Ceph client. Larger
- values mean more output, and range from 0 to 50. The default
- is 1 (high-level informational messages only).
-
- debug_console=N
- If non-zero, debug messages will be printk'ed with KERN_ERR,
- causing them to appear on the system console. Otherwise,
- messages will be printed with KERN_DEBUG and will appear in
- the system log.
-
- debug_msgr=N
- Debug level for the messaging/communications layer, if >= 0.
- Default is -1.
-
- debug_mdsc=N
- Debug level for the MDS client, if >= 0.
-
- debug_osdc=N
- Debug level for the OSD client, if >= 0.
-
- debug_addr=N
- Debug level for address space operations, if >= 0.
-
- debug_file=N
- Debug level for file operations, if >= 0.
-
- debug_inode=N
- Debug level for inode operations, if >= 0.
-
- debug_caps=N
- Debug level for file capability operations, if >= 0.
-
- debug_snap=N
- Debug level for snapshot operations, if >= 0.
-
-
-
-
More Information
================
#include <linux/string.h>
-extern int ceph_debug __read_mostly; /* debug level. */
-extern int ceph_debug_console __read_mostly; /* send debug output to console? */
-extern int ceph_debug_mask __read_mostly;
-
-/*
- * different debug levels for different modules. These default to -1.
- * If they are >= 0, then they override the global ceph_debug value.
- */
-extern int ceph_debug_addr __read_mostly;
-extern int ceph_debug_caps __read_mostly;
-extern int ceph_debug_dir __read_mostly;
-extern int ceph_debug_export __read_mostly;
-extern int ceph_debug_file __read_mostly;
-extern int ceph_debug_inode __read_mostly;
-extern int ceph_debug_ioctl __read_mostly;
-extern int ceph_debug_mdsc __read_mostly;
-extern int ceph_debug_mdsmap __read_mostly;
-extern int ceph_debug_msgr __read_mostly;
-extern int ceph_debug_mon __read_mostly;
-extern int ceph_debug_osdc __read_mostly;
-extern int ceph_debug_osdmap __read_mostly;
-extern int ceph_debug_snap __read_mostly;
-extern int ceph_debug_super __read_mostly;
-extern int ceph_debug_protocol __read_mostly;
-extern int ceph_debug_proc __read_mostly;
-extern int ceph_debug_tools __read_mostly;
-
-#define DOUT_MASK_ADDR 0x00000001
-#define DOUT_MASK_CAPS 0x00000002
-#define DOUT_MASK_DIR 0x00000004
-#define DOUT_MASK_EXPORT 0x00000008
-#define DOUT_MASK_FILE 0x00000010
-#define DOUT_MASK_INODE 0x00000020
-#define DOUT_MASK_IOCTL 0x00000040
-#define DOUT_MASK_MDSC 0x00000080
-#define DOUT_MASK_MDSMAP 0x00000100
-#define DOUT_MASK_MSGR 0x00000200
-#define DOUT_MASK_MON 0x00000400
-#define DOUT_MASK_OSDC 0x00000800
-#define DOUT_MASK_OSDMAP 0x00001000
-#define DOUT_MASK_SNAP 0x00002000
-#define DOUT_MASK_SUPER 0x00004000
-#define DOUT_MASK_PROTOCOL 0x00008000
-#define DOUT_MASK_PROC 0x00010000
-#define DOUT_MASK_TOOLS 0x00020000
-
-#define DOUT_UNMASKABLE 0x80000000
-
#define _STRINGIFY(x) #x
#define STRINGIFY(x) _STRINGIFY(x)
-#define FMT_PREFIX "%-30.30s: "
+#define FMT_PREFIX "%-26.26s: "
#define FMT_SUFFIX "%s"
#define LOG_ARGS __FILE__ ":" STRINGIFY(__LINE__)
#define TRAIL_PARAM ""
#define LOG_LINE FMT_PREFIX fmt, LOG_ARGS, args
-#define dout_flag(x, mask, fmt, args...) do { \
- if (((ceph_debug_mask | DOUT_UNMASKABLE) & mask) && \
- ((DOUT_VAR >= 0 && (x) <= DOUT_VAR) || \
- (DOUT_VAR < 0 && (x) <= ceph_debug))) { \
- if (ceph_debug_console) \
- printk(KERN_ERR FMT_PREFIX fmt, LOG_ARGS, \
- args); \
- else \
- printk(KERN_DEBUG FMT_PREFIX fmt, LOG_ARGS, \
- args); \
- } \
- } while (0)
-
-#define _dout(x, fmt, args...) dout_flag((x), DOUT_MASK, fmt FMT_SUFFIX, args)
-
-#define dout(x, args...) _dout((x), args, TRAIL_PARAM)
+#define _dout(fmt, args...) pr_debug(FMT_PREFIX fmt FMT_SUFFIX, LOG_ARGS, args);
+#define dout(args...) _dout(args, TRAIL_PARAM)
#endif
#include "mds_client.h"
static struct dentry *ceph_debugfs_dir;
-static struct dentry *ceph_debugfs_debug;
-static struct dentry *ceph_debugfs_debug_msgr;
-static struct dentry *ceph_debugfs_debug_console;
-static struct dentry *ceph_debugfs_debug_mask;
static struct dentry *ceph_debugfs_caps_reservation;
-/*
- * ceph_debug_mask
- */
-struct _debug_mask_name {
- int mask;
- char *name;
-};
-
-static struct _debug_mask_name _debug_mask_names[] = {
- {DOUT_MASK_ADDR, "addr"},
- {DOUT_MASK_CAPS, "caps"},
- {DOUT_MASK_DIR, "dir"},
- {DOUT_MASK_EXPORT, "export"},
- {DOUT_MASK_FILE, "file"},
- {DOUT_MASK_INODE, "inode"},
- {DOUT_MASK_IOCTL, "ioctl"},
- {DOUT_MASK_MDSC, "mdsc"},
- {DOUT_MASK_MDSMAP, "mdsmap"},
- {DOUT_MASK_MSGR, "msgr"},
- {DOUT_MASK_MON, "mon"},
- {DOUT_MASK_OSDC, "osdc"},
- {DOUT_MASK_OSDMAP, "osdmap"},
- {DOUT_MASK_SNAP, "snap"},
- {DOUT_MASK_SUPER, "super"},
- {DOUT_MASK_PROTOCOL, "protocol"},
- {DOUT_MASK_PROC, "proc"},
- {DOUT_MASK_TOOLS, "tools"},
- {0, NULL}
-};
-
-static int debug_mask_show(struct seq_file *s, void *p)
-{
- int i = 0;
- seq_printf(s, "0x%x", ceph_debug_mask);
-
- while (_debug_mask_names[i].mask) {
- if (ceph_debug_mask & _debug_mask_names[i].mask)
- seq_printf(s, " %s",
- _debug_mask_names[i].name);
- i++;
- }
- seq_printf(s, "\n");
- return 0;
-}
-
-static int get_debug_mask(const char *name, int len)
-{
- int i = 0;
-
- while (_debug_mask_names[i].name) {
- if (strncmp(_debug_mask_names[i].name, name, len) == 0)
- return _debug_mask_names[i].mask;
- i++;
- }
- return 0;
-}
-
-static ssize_t debug_mask_store(struct file *file, const char __user *buffer,
- size_t count, loff_t *data)
-{
- char *next, *tok;
- char *buf;
-
- if (count > PAGE_SIZE)
- return -EINVAL;
-
- buf = kmalloc(count + 1, GFP_KERNEL);
-
- if (copy_from_user(buf, buffer, count))
- return -EFAULT;
-
- buf[count] = '\0';
-
- next = buf;
-
- while (1) {
- tok = next;
- next = strpbrk(tok, " \t\r\n");
- if (!next)
- break;
- if (isdigit(*tok)) {
- ceph_debug_mask = simple_strtol(tok, NULL, 0);
- } else {
- int remove = 0;
- int mask;
-
- if (*tok == '-') {
- remove = 1;
- tok++;
- } else if (*tok == '+')
- tok++;
- mask = get_debug_mask(tok, next-tok);
- if (mask) {
- if (remove)
- ceph_debug_mask &= ~mask;
- else
- ceph_debug_mask |= mask;
- }
- }
- next++;
- }
-
- kfree(buf);
-
- return count;
-}
-
-static int debug_mask_open(struct inode *inode, struct file *file)
-{
- return single_open(file, debug_mask_show, NULL);
-}
-
-static const struct file_operations ceph_debug_mask_fops = {
- .open = debug_mask_open,
- .read = seq_read,
- .write = debug_mask_store,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static int fsid_show(struct seq_file *s, void *p)
{
struct ceph_client *client = s->private;
if (!ceph_debugfs_dir)
goto out;
- ceph_debugfs_debug = debugfs_create_u32("debug",
- 0600,
- ceph_debugfs_dir,
- (u32 *)&ceph_debug);
- if (!ceph_debugfs_debug)
- goto out;
-
- ceph_debugfs_debug_msgr = debugfs_create_u32("msgr",
- 0600,
- ceph_debugfs_dir,
- (u32 *)&ceph_debug_msgr);
- if (!ceph_debugfs_debug_msgr)
- goto out;
-
- ceph_debugfs_debug_console = debugfs_create_u32("console",
- 0600,
- ceph_debugfs_dir,
- (u32 *)&ceph_debug_console);
- if (!ceph_debugfs_debug_console)
- goto out;
-
- ceph_debugfs_debug_mask = debugfs_create_file("mask",
- 0600,
- ceph_debugfs_dir,
- NULL,
- &ceph_debug_mask_fops);
- if (!ceph_debugfs_debug_mask)
- goto out;
-
ceph_debugfs_caps_reservation = debugfs_create_file("caps_reservation",
0400,
ceph_debugfs_dir,
void ceph_debugfs_cleanup(void)
{
debugfs_remove(ceph_debugfs_caps_reservation);
- debugfs_remove(ceph_debugfs_debug_console);
- debugfs_remove(ceph_debugfs_debug_mask);
- debugfs_remove(ceph_debugfs_debug_msgr);
- debugfs_remove(ceph_debugfs_debug);
debugfs_remove(ceph_debugfs_dir);
}
#include <linux/sched.h>
#include "ceph_debug.h"
-
-int ceph_debug_dir __read_mostly = -1;
-#define DOUT_MASK DOUT_MASK_DIR
-#define DOUT_VAR ceph_debug_dir
#include "super.h"
/*
last = fi->dentry;
fi->dentry = NULL;
- dout(10, "__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
+ dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
last);
spin_lock(&dcache_lock);
if (list_empty(&parent->d_subdirs))
goto out_unlock;
p = parent->d_subdirs.prev;
- dout(10, " initial p %p/%p\n", p->prev, p->next);
+ dout(" initial p %p/%p\n", p->prev, p->next);
} else {
p = &last->d_u.d_child;
}
dentry = list_entry(p, struct dentry, d_u.d_child);
di = ceph_dentry(dentry);
while (1) {
- dout(10, " p %p/%p d_subdirs %p/%p\n", p->prev, p->next,
+ dout(" p %p/%p d_subdirs %p/%p\n", p->prev, p->next,
parent->d_subdirs.prev, parent->d_subdirs.next);
if (p == &parent->d_subdirs) {
fi->at_end = 1;
if (!d_unhashed(dentry) && dentry->d_inode &&
filp->f_pos <= di->offset)
break;
- dout(10, " skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
+ dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
dentry->d_name.len, dentry->d_name.name, di->offset,
filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
!dentry->d_inode ? " null" : "");
last = NULL;
}
- dout(10, " %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
+ dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
filp->f_pos = di->offset;
err = filldir(dirent, dentry->d_name.name,
/* make sure a dentry wasn't dropped while we didn't have dcache_lock */
if ((ceph_inode(dir)->i_ceph_flags & CEPH_I_COMPLETE))
goto more;
- dout(20, " lost I_COMPLETE on %p; falling back to mds\n", dir);
+ dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
err = -EAGAIN;
out_unlock:
int len;
const int max_entries = client->mount_args.max_readdir;
- dout(5, "readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
+ dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
if (fi->at_end)
return 0;
/* note dir version at start of readdir */
fi->dir_release_count = ci->i_release_count;
- dout(10, "readdir off 0 -> '.'\n");
+ dout("readdir off 0 -> '.'\n");
if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
inode->i_ino, inode->i_mode >> 12) < 0)
return 0;
off = 1;
}
if (filp->f_pos == 1) {
- dout(10, "readdir off 1 -> '..'\n");
+ dout("readdir off 1 -> '..'\n");
if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
filp->f_dentry->d_parent->d_inode->i_ino,
inode->i_mode >> 12) < 0)
/* requery frag tree, as the frag topology may have changed */
frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
- dout(10, "readdir fetching %llx.%llx frag %x offset '%s'\n",
+ dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
ceph_vinop(inode), frag, fi->last_name);
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req))
ceph_mdsc_put_request(req);
return err;
}
- dout(10, "readdir got and parsed readdir result=%d"
+ dout("readdir got and parsed readdir result=%d"
" on frag %x, end=%d, complete=%d\n", err, frag,
(int)req->r_reply_info.dir_end,
(int)req->r_reply_info.dir_complete);
if (!req->r_did_prepopulate) {
- dout(10, "readdir !did_prepopulate");
+ dout("readdir !did_prepopulate");
fi->dir_release_count--;
}
len);
fi->last_name[len] = 0;
fi->next_offset += rinfo->dir_nr;
- dout(10, "readdir last item is '%s'\n", fi->last_name);
+ dout("readdir last item is '%s'\n", fi->last_name);
}
fi->last_readdir = req;
}
rinfo = &fi->last_readdir->r_reply_info;
- dout(10, "readdir frag %x num %d off %d chunkoff %d\n", frag,
+ dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
rinfo->dir_nr, off, fi->offset);
while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
u64 pos = ceph_make_fpos(frag, off);
struct ceph_mds_reply_inode *in =
rinfo->dir_in[off - fi->offset].in;
- dout(10, "readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
+ dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
off, off - fi->offset, rinfo->dir_nr, pos,
rinfo->dir_dname_len[off - fi->offset],
rinfo->dir_dname[off - fi->offset], in);
pos,
le64_to_cpu(in->ino),
ftype) < 0) {
- dout(20, "filldir stopping us...\n");
+ dout("filldir stopping us...\n");
return 0;
}
off++;
frag = frag_next(frag);
off = 0;
filp->f_pos = ceph_make_fpos(frag, off);
- dout(10, "readdir next frag is %x\n", frag);
+ dout("readdir next frag is %x\n", frag);
goto more;
}
fi->at_end = 1;
*/
spin_lock(&inode->i_lock);
if (ci->i_release_count == fi->dir_release_count) {
- dout(10, " marking %p complete\n", inode);
+ dout(" marking %p complete\n", inode);
ci->i_ceph_flags |= CEPH_I_COMPLETE;
ci->i_max_offset = filp->f_pos;
}
spin_unlock(&inode->i_lock);
- dout(20, "readdir %p filp %p done.\n", inode, filp);
+ dout("readdir %p filp %p done.\n", inode, filp);
return 0;
}
if (offset == 0 ||
fpos_frag(offset) != fpos_frag(old_offset) ||
fpos_off(offset) < fi->offset) {
- dout(10, "dir_llseek dropping %p content\n", file);
+ dout("dir_llseek dropping %p content\n", file);
reset_readdir(fi);
}
ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */
strcmp(dentry->d_name.name, client->mount_args.snapdir_name) == 0) {
struct inode *inode = ceph_get_snapdir(parent);
- dout(10, "ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
+ dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
dentry, dentry->d_name.len, dentry->d_name.name, inode);
d_add(dentry, inode);
err = 0;
/* no trace? */
err = 0;
if (!req->r_reply_info.head->is_dentry) {
- dout(20, "ENOENT and no trace, dentry %p inode %p\n",
+ dout("ENOENT and no trace, dentry %p inode %p\n",
dentry, dentry->d_inode);
if (dentry->d_inode) {
d_drop(dentry);
int op;
int err;
- dout(5, "lookup %p dentry %p '%.*s'\n",
+ dout("lookup %p dentry %p '%.*s'\n",
dir, dentry, dentry->d_name.len, dentry->d_name.name);
if (dentry->d_name.len > NAME_MAX)
struct ceph_dentry_info *di = ceph_dentry(dentry);
spin_lock(&dir->i_lock);
- dout(40, " dir %p flags are %d\n", dir, ci->i_ceph_flags);
+ dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
if (strncmp(dentry->d_name.name,
client->mount_args.snapdir_name,
dentry->d_name.len) &&
(__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
di->offset = ci->i_max_offset++;
spin_unlock(&dir->i_lock);
- dout(10, " dir %p complete, -ENOENT\n", dir);
+ dout(" dir %p complete, -ENOENT\n", dir);
d_add(dentry, NULL);
di->lease_rdcache_gen = ci->i_rdcache_gen;
return NULL;
err = ceph_mdsc_do_request(mdsc, NULL, req);
dentry = ceph_finish_lookup(req, dentry, err);
ceph_mdsc_put_request(req); /* will dput(dentry) */
- dout(20, "lookup result=%p\n", dentry);
+ dout("lookup result=%p\n", dentry);
return dentry;
}
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
- dout(5, "mknod in dir %p dentry %p mode 0%o rdev %d\n",
+ dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
dir, dentry, mode, rdev);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
if (IS_ERR(req)) {
static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
struct nameidata *nd)
{
- dout(5, "create in dir %p dentry %p name '%.*s'\n",
+ dout("create in dir %p dentry %p name '%.*s'\n",
dir, dentry, dentry->d_name.len, dentry->d_name.name);
if (ceph_snap(dir) != CEPH_NOSNAP)
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
- dout(5, "symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
+ dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
if (IS_ERR(req)) {
d_drop(dentry);
if (ceph_snap(dir) == CEPH_SNAPDIR) {
/* mkdir .snap/foo is a MKSNAP */
op = CEPH_MDS_OP_MKSNAP;
- dout(5, "mksnap dir %p snap '%.*s' dn %p\n", dir,
+ dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
dentry->d_name.len, dentry->d_name.name, dentry);
} else if (ceph_snap(dir) == CEPH_NOSNAP) {
- dout(5, "mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
+ dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
op = CEPH_MDS_OP_MKDIR;
} else {
goto out;
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
- dout(5, "link in dir %p old_dentry %p dentry %p\n", dir,
+ dout("link in dir %p old_dentry %p dentry %p\n", dir,
old_dentry, dentry);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
if (IS_ERR(req)) {
if (ceph_snap(dir) == CEPH_SNAPDIR) {
/* rmdir .snap/foo is RMSNAP */
- dout(5, "rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
+ dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
dentry->d_name.name, dentry);
op = CEPH_MDS_OP_RMSNAP;
} else if (ceph_snap(dir) == CEPH_NOSNAP) {
- dout(5, "unlink/rmdir dir %p dn %p inode %p\n",
+ dout("unlink/rmdir dir %p dn %p inode %p\n",
dir, dentry, inode);
op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
if (ceph_snap(old_dir) != CEPH_NOSNAP ||
ceph_snap(new_dir) != CEPH_NOSNAP)
return -EROFS;
- dout(5, "rename dir %p dentry %p to dir %p dentry %p\n",
+ dout("rename dir %p dentry %p to dir %p dentry %p\n",
old_dir, old_dentry, new_dir, new_dentry);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
if (IS_ERR(req))
if (mds >= 0)
ceph_mdsc_lease_send_msg(&ceph_client(dentry->d_sb)->mdsc,
mds, dir, dentry, CEPH_MDS_LEASE_RENEW, seq);
- dout(20, "dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
+ dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
return valid;
}
if (ci->i_rdcache_gen == di->lease_rdcache_gen)
valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
spin_unlock(&dir->i_lock);
- dout(20, "dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
+ dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
dir, (unsigned)ci->i_rdcache_gen, dentry,
(unsigned)di->lease_rdcache_gen, valid);
return valid;
{
struct inode *dir = dentry->d_parent->d_inode;
- dout(10, "d_revalidate %p '%.*s' inode %p\n", dentry,
+ dout("d_revalidate %p '%.*s' inode %p\n", dentry,
dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
/* always trust cached snapped dentries, snapdir dentry */
if (ceph_snap(dir) != CEPH_NOSNAP) {
- dout(10, "d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
+ dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
goto out_touch;
}
if (dir_lease_is_valid(dir, dentry))
goto out_touch;
- dout(20, "dentry_revalidate %p invalid\n", dentry);
+ dout("dentry_revalidate %p invalid\n", dentry);
d_drop(dentry);
return 0;
out_touch:
spin_lock(&parent_inode->i_lock);
if (ci->i_rdcache_gen == di->lease_rdcache_gen) {
- dout(10, " clearing %p complete (d_release)\n",
+ dout(" clearing %p complete (d_release)\n",
parent_inode);
ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
ci->i_release_count++;
u64 last_tid;
int ret = 0;
- dout(10, "dir_fsync %p\n", inode);
+ dout("dir_fsync %p\n", inode);
spin_lock(&ci->i_unsafe_lock);
if (list_empty(head))
goto out;
do {
ceph_mdsc_get_request(req);
spin_unlock(&ci->i_unsafe_lock);
- dout(10, "dir_fsync %p wait on tid %llu (until %llu)\n",
+ dout("dir_fsync %p wait on tid %llu (until %llu)\n",
inode, req->r_tid, last_tid);
if (req->r_timeout) {
ret = wait_for_completion_timeout(&req->r_safe_completion,
{
struct ceph_dentry_info *di = ceph_dentry(dn);
struct ceph_mds_client *mdsc;
- dout(30, "dentry_lru_add %p %p\t%.*s\n",
+ dout("dentry_lru_add %p %p\t%.*s\n",
di, dn, dn->d_name.len, dn->d_name.name);
if (di) {
{
struct ceph_dentry_info *di = ceph_dentry(dn);
struct ceph_mds_client *mdsc;
- dout(30, "dentry_lru_touch %p %p\t%.*s\n",
+ dout("dentry_lru_touch %p %p\t%.*s\n",
di, dn, dn->d_name.len, dn->d_name.name);
if (di) {
struct ceph_dentry_info *di = ceph_dentry(dn);
struct ceph_mds_client *mdsc;
- dout(30, "dentry_lru_del %p %p\t%.*s\n",
+ dout("dentry_lru_del %p %p\t%.*s\n",
di, dn, dn->d_name.len, dn->d_name.name);
if (di) {
mdsc = &ceph_client(dn->d_sb)->mdsc;
#include "super.h"
#include "ceph_debug.h"
-int ceph_debug_export __read_mostly = -1;
-#define DOUT_MASK DOUT_MASK_EXPORT
-#define DOUT_VAR ceph_debug_export
-
/*
* fh is N tuples of
* <ino, parent's d_name.hash>
int len;
struct dentry *d_parent;
- dout(10, "encode_fh %p max_len %d u32s (%d export items)%s\n", dentry,
+ dout("encode_fh %p max_len %d u32s (%d export items)%s\n", dentry,
*max_len, max, connectable ? " connectable" : "");
if (max < 1 || (connectable && max < 2))
iput(inode);
return ERR_PTR(err);
}
- dout(10, "fh_to_dentry %llx.%x -- inode %p dentry %p\n", fh->ino.ino,
+ dout("fh_to_dentry %llx.%x -- inode %p dentry %p\n", fh->ino.ino,
hash, inode, dentry);
return dentry;
#include <linux/sched.h>
#include <linux/file.h>
+#include <linux/namei.h>
#include <linux/writeback.h>
#include "ceph_debug.h"
-
-int ceph_debug_file __read_mostly = -1;
-#define DOUT_MASK DOUT_MASK_FILE
-#define DOUT_VAR ceph_debug_file
#include "super.h"
-
#include "mds_client.h"
-#include <linux/namei.h>
-
/*
* Prepare an open request. Preallocate ceph_cap to avoid an
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
case S_IFDIR:
- dout(20, "init_file %p %p 0%o (regular)\n", inode, file,
+ dout("init_file %p %p 0%o (regular)\n", inode, file,
inode->i_mode);
cf = kzalloc(sizeof(*cf), GFP_NOFS);
if (cf == NULL) {
break;
case S_IFLNK:
- dout(20, "init_file %p %p 0%o (symlink)\n", inode, file,
+ dout("init_file %p %p 0%o (symlink)\n", inode, file,
inode->i_mode);
ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
break;
default:
- dout(20, "init_file %p %p 0%o (special)\n", inode, file,
+ dout("init_file %p %p 0%o (special)\n", inode, file,
inode->i_mode);
/*
* we need to drop the open ref now, since we don't
int flags, fmode, wanted;
if (cf) {
- dout(5, "open file %p is already opened\n", file);
+ dout("open file %p is already opened\n", file);
return 0;
}
if (S_ISDIR(inode->i_mode))
flags = O_DIRECTORY; /* mds likes to know */
- dout(5, "open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
+ dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
ceph_vinop(inode), file, flags, file->f_flags);
fmode = ceph_flags_to_mode(flags);
wanted = ceph_caps_for_mode(fmode);
int mds_wanted = __ceph_caps_mds_wanted(ci);
int issued = __ceph_caps_issued(ci, NULL);
- dout(10, "open %p fmode %d want %s issued %s using existing\n",
+ dout("open %p fmode %d want %s issued %s using existing\n",
inode, fmode, ceph_cap_string(wanted),
ceph_cap_string(issued));
__ceph_get_fmode(ci, fmode);
}
spin_unlock(&inode->i_lock);
- dout(10, "open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
+ dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
req = prepare_open_request(inode->i_sb, flags, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
if (!err)
err = ceph_init_file(inode, file, req->r_fmode);
ceph_mdsc_put_request(req);
- dout(5, "open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
+ dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
out:
return err;
}
int err;
int flags = nd->intent.open.flags - 1; /* silly vfs! */
- dout(5, "ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
+ dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
dentry, dentry->d_name.len, dentry->d_name.name, flags, mode);
/* do the open */
err = ceph_init_file(req->r_dentry->d_inode, file,
req->r_fmode);
ceph_mdsc_put_request(req);
- dout(5, "ceph_lookup_open result=%p\n", dentry);
+ dout("ceph_lookup_open result=%p\n", dentry);
return dentry;
}
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *cf = file->private_data;
- dout(5, "release inode %p file %p\n", inode, file);
+ dout("release inode %p file %p\n", inode, file);
ceph_put_fmode(ci, cf->fmode);
if (cf->last_readdir)
ceph_mdsc_put_request(cf->last_readdir);
int read = 0;
int ret;
- dout(10, "sync_read on file %p %llu~%u %s\n", file, start_off, left,
+ dout("sync_read on file %p %llu~%u %s\n", file, start_off, left,
(file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
if (file->f_flags & O_DIRECT) {
{
struct ceph_inode_info *ci = ceph_inode(req->r_inode);
- dout(10, "sync_write_commit %p tid %llu\n", req, req->r_tid);
+ dout("sync_write_commit %p tid %llu\n", req, req->r_tid);
spin_lock(&ci->i_unsafe_lock);
list_del_init(&req->r_unsafe_item);
spin_unlock(&ci->i_unsafe_lock);
do {
ceph_osdc_get_request(req);
spin_unlock(&ci->i_unsafe_lock);
- dout(10, "sync_write_wait on tid %llu (until %llu)\n",
+ dout("sync_write_wait on tid %llu (until %llu)\n",
req->r_tid, last_tid);
wait_for_completion(&req->r_safe_completion);
spin_lock(&ci->i_unsafe_lock);
if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP)
return -EROFS;
- dout(10, "sync_write on file %p %lld~%u %s\n", file, *offset,
+ dout("sync_write on file %p %lld~%u %s\n", file, *offset,
(unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
if (file->f_flags & O_APPEND)
ssize_t ret;
int got = 0;
- dout(10, "aio_read %llx.%llx %llu~%u trying to get caps on %p\n",
+ dout("aio_read %llx.%llx %llu~%u trying to get caps on %p\n",
ceph_vinop(inode), pos, (unsigned)len, inode);
__ceph_do_pending_vmtruncate(inode);
ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_CACHE,
&got, -1);
if (ret < 0)
goto out;
- dout(10, "aio_read %llx.%llx %llu~%u got cap refs on %s\n",
+ dout("aio_read %llx.%llx %llu~%u got cap refs on %s\n",
ceph_vinop(inode), pos, (unsigned)len, ceph_cap_string(got));
if ((got & CEPH_CAP_FILE_CACHE) == 0 ||
ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
out:
- dout(10, "aio_read %llx.%llx dropping cap refs on %s\n",
+ dout("aio_read %llx.%llx dropping cap refs on %s\n",
ceph_vinop(inode), ceph_cap_string(got));
ceph_put_cap_refs(ci, got);
return ret;
if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
return -ENOSPC;
__ceph_do_pending_vmtruncate(inode);
- dout(10, "aio_write %p %llu~%u getting caps. i_size %llu\n",
+ dout("aio_write %p %llu~%u getting caps. i_size %llu\n",
inode, pos, (unsigned)iov->iov_len, inode->i_size);
ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
&got, endoff);
if (ret < 0)
goto out;
- dout(10, "aio_write %p %llu~%u got cap refs on %s\n",
+ dout("aio_write %p %llu~%u got cap refs on %s\n",
inode, pos, (unsigned)iov->iov_len, ceph_cap_string(got));
if ((got & CEPH_CAP_FILE_BUFFER) == 0 ||
}
out:
- dout(10, "aio_write %p %llu~%u dropping cap refs on %s\n",
+ dout("aio_write %p %llu~%u dropping cap refs on %s\n",
inode, pos, (unsigned)iov->iov_len, ceph_cap_string(got));
ceph_put_cap_refs(ci, got);
if (ret == -EOLDSNAPC) {
- dout(10, "aio_write %p %llu~%u got EOLDSNAPC, retrying\n",
+ dout("aio_write %p %llu~%u got EOLDSNAPC, retrying\n",
inode, pos, (unsigned)iov->iov_len);
goto retry_snap;
}
struct inode *inode = dentry->d_inode;
int ret;
- dout(10, "fsync %p\n", inode);
+ dout("fsync %p\n", inode);
sync_write_wait(inode);
ret = filemap_write_and_wait(inode->i_mapping);
#include <linux/writeback.h>
#include "ceph_debug.h"
-
-int ceph_debug_inode __read_mostly = -1;
-#define DOUT_MASK DOUT_MASK_INODE
-#define DOUT_VAR ceph_debug_inode
#include "super.h"
#include "decode.h"
if (inode == NULL)
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
- dout(40, "get_inode created new inode %p %llx.%llx ino %llx\n",
+ dout("get_inode created new inode %p %llx.%llx ino %llx\n",
inode, ceph_vinop(inode), (u64)inode->i_ino);
unlock_new_inode(inode);
}
- dout(30, "get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
+ dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
vino.snap, inode);
return inode;
}
rb_link_node(&frag->node, parent, p);
rb_insert_color(&frag->node, &ci->i_fragtree);
- dout(20, "get_or_create_frag added %llx.%llx frag %x\n",
+ dout("get_or_create_frag added %llx.%llx frag %x\n",
ceph_vinop(&ci->vfs_inode), f);
return frag;
/* choose child */
nway = 1 << frag->split_by;
- dout(30, "choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
+ dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
frag->split_by, nway);
for (i = 0; i < nway; i++) {
n = frag_make_child(t, frag->split_by, i);
}
BUG_ON(i == nway);
}
- dout(30, "choose_frag(%x) = %x\n", v, t);
+ dout("choose_frag(%x) = %x\n", v, t);
mutex_unlock(&ci->i_fragtree_mutex);
return t;
goto out;
if (frag->split_by == 0) {
/* tree leaf, remove */
- dout(20, "fill_dirfrag removed %llx.%llx frag %x"
+ dout("fill_dirfrag removed %llx.%llx frag %x"
" (no ref)\n", ceph_vinop(inode), id);
rb_erase(&frag->node, &ci->i_fragtree);
kfree(frag);
} else {
/* tree branch, keep and clear */
- dout(20, "fill_dirfrag cleared %llx.%llx frag %x"
+ dout("fill_dirfrag cleared %llx.%llx frag %x"
" referral\n", ceph_vinop(inode), id);
frag->mds = -1;
frag->ndist = 0;
frag->ndist = min_t(u32, ndist, MAX_DIRFRAG_REP);
for (i = 0; i < frag->ndist; i++)
frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
- dout(20, "fill_dirfrag %llx.%llx frag %x referral mds %d ndist=%d\n",
+ dout("fill_dirfrag %llx.%llx frag %x referral mds %d ndist=%d\n",
ceph_vinop(inode), frag->frag, frag->mds, frag->ndist);
out:
if (!ci)
return NULL;
- dout(10, "alloc_inode %p\n", &ci->vfs_inode);
+ dout("alloc_inode %p\n", &ci->vfs_inode);
ci->i_version = 0;
ci->i_time_warp_seq = 0;
struct ceph_inode_frag *frag;
struct rb_node *n;
- dout(30, "destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
+ dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
ceph_queue_caps_release(inode);
if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
(truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
- dout(10, "size %lld -> %llu\n", inode->i_size, size);
+ dout("size %lld -> %llu\n", inode->i_size, size);
inode->i_size = size;
inode->i_blocks = (size + (1<<9) - 1) >> 9;
ci->i_reported_size = size;
if (truncate_seq != ci->i_truncate_seq) {
- dout(10, "truncate_seq %u -> %u\n",
+ dout("truncate_seq %u -> %u\n",
ci->i_truncate_seq, truncate_seq);
ci->i_truncate_seq = truncate_seq;
if (issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
}
if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
ci->i_truncate_size != truncate_size) {
- dout(10, "truncate_size %lld -> %llu\n", ci->i_truncate_size,
+ dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
truncate_size);
ci->i_truncate_size = truncate_size;
}
CEPH_CAP_FILE_WR|
CEPH_CAP_FILE_BUFFER)) {
if (timespec_compare(ctime, &inode->i_ctime) > 0) {
- dout(20, "ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
+ dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
ctime->tv_sec, ctime->tv_nsec);
inode->i_ctime = *ctime;
}
if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
/* the MDS did a utimes() */
- dout(20, "mtime %ld.%09ld -> %ld.%09ld "
+ dout("mtime %ld.%09ld -> %ld.%09ld "
"tw %d -> %d\n",
inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
mtime->tv_sec, mtime->tv_nsec,
} else if (time_warp_seq == ci->i_time_warp_seq) {
/* nobody did utimes(); take the max */
if (timespec_compare(mtime, &inode->i_mtime) > 0) {
- dout(20, "mtime %ld.%09ld -> %ld.%09ld inc\n",
+ dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
inode->i_mtime.tv_sec,
inode->i_mtime.tv_nsec,
mtime->tv_sec, mtime->tv_nsec);
inode->i_mtime = *mtime;
}
if (timespec_compare(atime, &inode->i_atime) > 0) {
- dout(20, "atime %ld.%09ld -> %ld.%09ld inc\n",
+ dout("atime %ld.%09ld -> %ld.%09ld inc\n",
inode->i_atime.tv_sec,
inode->i_atime.tv_nsec,
atime->tv_sec, atime->tv_nsec);
}
}
if (warn) /* time_warp_seq shouldn't go backwards */
- dout(10, "%p mds time_warp_seq %llu < %u\n",
+ dout("%p mds time_warp_seq %llu < %u\n",
inode, time_warp_seq, ci->i_time_warp_seq);
}
int err = 0;
int queue_trunc = 0;
- dout(30, "fill_inode %p ino %llx.%llx v %llu had %llu\n",
+ dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
inode, ceph_vinop(inode), le64_to_cpu(info->version),
ci->i_version);
inode->i_mode = le32_to_cpu(info->mode);
inode->i_uid = le32_to_cpu(info->uid);
inode->i_gid = le32_to_cpu(info->gid);
- dout(20, "%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
+ dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
inode->i_uid, inode->i_gid);
}
if (IS_ERR(frag))
continue;
frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
- dout(20, " frag %x split by %d\n", frag->frag, frag->split_by);
+ dout(" frag %x split by %d\n", frag->frag, frag->split_by);
}
mutex_unlock(&ci->i_fragtree_mutex);
caps_reservation);
} else {
spin_lock(&inode->i_lock);
- dout(20, " %p got snap_caps %s\n", inode,
+ dout(" %p got snap_caps %s\n", inode,
ceph_cap_string(le32_to_cpu(info->cap.caps)));
ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
if (cap_fmode >= 0)
if (ci->i_files == 0 && ci->i_subdirs == 0 &&
ceph_snap(inode) == CEPH_NOSNAP &&
(le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED)) {
- dout(10, " marking %p complete (empty)\n", inode);
+ dout(" marking %p complete (empty)\n", inode);
ci->i_ceph_flags |= CEPH_I_COMPLETE;
ci->i_max_offset = 2;
}
return;
spin_lock(&dentry->d_lock);
- dout(10, "update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
+ dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
dentry, le16_to_cpu(lease->mask), duration, ttl);
/* make lease_rdcache_gen match directory */
dn = realdn; /* note realdn contains the error */
goto out;
} else if (realdn) {
- dout(10, "dn %p (%d) spliced with %p (%d) "
+ dout("dn %p (%d) spliced with %p (%d) "
"inode %p ino %llx.%llx\n",
dn, atomic_read(&dn->d_count),
realdn, atomic_read(&realdn->d_count),
} else {
BUG_ON(!ceph_dentry(dn));
- dout(10, "dn %p attached to %p ino %llx.%llx\n",
+ dout("dn %p attached to %p ino %llx.%llx\n",
dn, dn->d_inode, ceph_vinop(dn->d_inode));
}
if ((!prehash || *prehash) && d_unhashed(dn))
int i = 0;
int err = 0;
- dout(10, "fill_trace %p is_dentry %d is_target %d\n", req,
+ dout("fill_trace %p is_dentry %d is_target %d\n", req,
rinfo->head->is_dentry, rinfo->head->is_target);
#if 0
#endif
if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
- dout(10, "fill_trace reply is empty!\n");
+ dout("fill_trace reply is empty!\n");
if (rinfo->head->result == 0 && req->r_locked_dir) {
struct ceph_inode_info *ci =
ceph_inode(req->r_locked_dir);
- dout(10, " clearing %p complete (empty trace)\n",
+ dout(" clearing %p complete (empty trace)\n",
req->r_locked_dir);
ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
ci->i_release_count++;
CEPH_LOCK_DN);
if (!have_lease)
- dout(10, "fill_trace no dentry lease or dir cap\n");
+ dout("fill_trace no dentry lease or dir cap\n");
/* rename? */
if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
- dout(10, " src %p '%.*s' dst %p '%.*s'\n",
+ dout(" src %p '%.*s' dst %p '%.*s'\n",
req->r_old_dentry,
req->r_old_dentry->d_name.len,
req->r_old_dentry->d_name.name,
dn, dn->d_name.len, dn->d_name.name);
- dout(10, "fill_trace doing d_move %p -> %p\n",
+ dout("fill_trace doing d_move %p -> %p\n",
req->r_old_dentry, dn);
d_move(req->r_old_dentry, dn);
- dout(10, " src %p '%.*s' dst %p '%.*s'\n",
+ dout(" src %p '%.*s' dst %p '%.*s'\n",
req->r_old_dentry,
req->r_old_dentry->d_name.len,
req->r_old_dentry->d_name.name,
/* null dentry? */
if (!rinfo->head->is_target) {
- dout(10, "fill_trace null dentry\n");
+ dout("fill_trace null dentry\n");
if (dn->d_inode) {
- dout(20, "d_delete %p\n", dn);
+ dout("d_delete %p\n", dn);
d_delete(dn);
} else {
- dout(20, "d_instantiate %p NULL\n", dn);
+ dout("d_instantiate %p NULL\n", dn);
d_instantiate(dn, NULL);
if (have_lease && d_unhashed(dn))
d_rehash(dn);
ceph_snap(in) == vino.snap) {
igrab(in);
} else {
- dout(10, " %p links to %p %llx.%llx, not %llx.%llx\n",
+ dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
dn, in, ceph_ino(in), ceph_snap(in),
vino.ino, vino.snap);
have_lease = false;
if (have_lease)
update_dentry_lease(dn, rinfo->dlease, session,
req->r_request_started);
- dout(10, " final dn %p\n", dn);
+ dout(" final dn %p\n", dn);
i++;
} else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
req->r_op == CEPH_MDS_OP_MKSNAP) {
d_delete(dn);
goto done;
}
- dout(10, " linking snapped dir %p to dn %p\n", in, dn);
+ dout(" linking snapped dir %p to dn %p\n", in, dn);
dn = splice_dentry(dn, in, NULL);
if (IS_ERR(dn)) {
err = PTR_ERR(dn);
}
done:
- dout(10, "fill_trace done err=%d\n", err);
+ dout("fill_trace done err=%d\n", err);
return err;
}
if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
snapdir = ceph_get_snapdir(parent->d_inode);
parent = d_find_alias(snapdir);
- dout(10, "readdir_prepopulate %d items under SNAPDIR dn %p\n",
+ dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
rinfo->dir_nr, parent);
} else {
- dout(10, "readdir_prepopulate %d items under dn %p\n",
+ dout("readdir_prepopulate %d items under dn %p\n",
rinfo->dir_nr, parent);
if (rinfo->dir_dir)
ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
retry_lookup:
dn = d_lookup(parent, &dname);
- dout(30, "d_lookup on parent=%p name=%.*s got %p\n",
+ dout("d_lookup on parent=%p name=%.*s got %p\n",
parent, dname.len, dname.name, dn);
if (!dn) {
dn = d_alloc(parent, &dname);
- dout(40, "d_alloc %p '%.*s' = %p\n", parent,
+ dout("d_alloc %p '%.*s' = %p\n", parent,
dname.len, dname.name, dn);
if (dn == NULL) {
- dout(30, "d_alloc badness\n");
+ dout("d_alloc badness\n");
err = -ENOMEM;
goto out;
}
} else if (dn->d_inode &&
(ceph_ino(dn->d_inode) != vino.ino ||
ceph_snap(dn->d_inode) != vino.snap)) {
- dout(10, " dn %p points to wrong inode %p\n",
+ dout(" dn %p points to wrong inode %p\n",
dn, dn->d_inode);
d_delete(dn);
dput(dn);
} else {
in = ceph_get_inode(parent->d_sb, vino);
if (in == NULL) {
- dout(30, "new_inode badness\n");
+ dout("new_inode badness\n");
d_delete(dn);
dput(dn);
err = -ENOMEM;
iput(snapdir);
dput(parent);
}
- dout(10, "readdir_prepopulate done\n");
+ dout("readdir_prepopulate done\n");
return err;
}
int ret = 0;
spin_lock(&inode->i_lock);
- dout(30, "set_size %p %llu -> %llu\n", inode, inode->i_size, size);
+ dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
inode->i_size = size;
inode->i_blocks = (size + (1 << 9) - 1) >> 9;
i_wb_work);
struct inode *inode = &ci->vfs_inode;
- dout(10, "writeback %p\n", inode);
+ dout("writeback %p\n", inode);
filemap_fdatawrite(&inode->i_data);
iput(inode);
}
int check = 0;
spin_lock(&inode->i_lock);
- dout(10, "invalidate_pages %p gen %d revoking %d\n", inode,
+ dout("invalidate_pages %p gen %d revoking %d\n", inode,
ci->i_rdcache_gen, ci->i_rdcache_revoking);
if (ci->i_rdcache_gen == 0 ||
ci->i_rdcache_revoking != ci->i_rdcache_gen) {
spin_lock(&inode->i_lock);
if (orig_gen == ci->i_rdcache_gen) {
- dout(10, "invalidate_pages %p gen %d successful\n", inode,
+ dout("invalidate_pages %p gen %d successful\n", inode,
ci->i_rdcache_gen);
ci->i_rdcache_gen = 0;
ci->i_rdcache_revoking = 0;
check = 1;
} else {
- dout(10, "invalidate_pages %p gen %d raced, gen now %d\n",
+ dout("invalidate_pages %p gen %d raced, gen now %d\n",
inode, orig_gen, ci->i_rdcache_gen);
}
spin_unlock(&inode->i_lock);
i_vmtruncate_work);
struct inode *inode = &ci->vfs_inode;
- dout(10, "vmtruncate_work %p\n", inode);
+ dout("vmtruncate_work %p\n", inode);
mutex_lock(&inode->i_mutex);
__ceph_do_pending_vmtruncate(inode);
mutex_unlock(&inode->i_mutex);
retry:
spin_lock(&inode->i_lock);
if (ci->i_truncate_pending == 0) {
- dout(10, "__do_pending_vmtruncate %p none pending\n", inode);
+ dout("__do_pending_vmtruncate %p none pending\n", inode);
spin_unlock(&inode->i_lock);
return;
}
* possibly truncate them.. so write AND block!
*/
if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
- dout(10, "__do_pending_vmtruncate %p flushing snaps first\n",
+ dout("__do_pending_vmtruncate %p flushing snaps first\n",
inode);
spin_unlock(&inode->i_lock);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
to = ci->i_truncate_size;
wrbuffer_refs = ci->i_wrbuffer_ref;
- dout(10, "__do_pending_vmtruncate %p (%d) to %lld\n", inode,
+ dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
ci->i_truncate_pending, to);
spin_unlock(&inode->i_lock);
spin_lock(&inode->i_lock);
issued = __ceph_caps_issued(ci, NULL);
- dout(10, "setattr %p issued %s\n", inode, ceph_cap_string(issued));
+ dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
if (ia_valid & ATTR_UID) {
- dout(10, "setattr %p uid %d -> %d\n", inode,
+ dout("setattr %p uid %d -> %d\n", inode,
inode->i_uid, attr->ia_uid);
if (issued & CEPH_CAP_AUTH_EXCL) {
inode->i_uid = attr->ia_uid;
}
}
if (ia_valid & ATTR_GID) {
- dout(10, "setattr %p gid %d -> %d\n", inode,
+ dout("setattr %p gid %d -> %d\n", inode,
inode->i_gid, attr->ia_gid);
if (issued & CEPH_CAP_AUTH_EXCL) {
inode->i_gid = attr->ia_gid;
}
}
if (ia_valid & ATTR_MODE) {
- dout(10, "setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
+ dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
attr->ia_mode);
if (issued & CEPH_CAP_AUTH_EXCL) {
inode->i_mode = attr->ia_mode;
}
if (ia_valid & ATTR_ATIME) {
- dout(10, "setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
+ dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
if (issued & CEPH_CAP_FILE_EXCL) {
}
}
if (ia_valid & ATTR_MTIME) {
- dout(10, "setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
+ dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
if (issued & CEPH_CAP_FILE_EXCL) {
}
}
if (ia_valid & ATTR_SIZE) {
- dout(10, "setattr %p size %lld -> %lld\n", inode,
+ dout("setattr %p size %lld -> %lld\n", inode,
inode->i_size, attr->ia_size);
if (attr->ia_size > inode->i_sb->s_maxbytes) {
err = -EINVAL;
/* these do nothing */
if (ia_valid & ATTR_CTIME)
- dout(10, "setattr %p ctime %ld.%ld -> %ld.%ld\n", inode,
+ dout("setattr %p ctime %ld.%ld -> %ld.%ld\n", inode,
inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec);
if (ia_valid & ATTR_FILE)
- dout(10, "setattr %p ATTR_FILE ... hrm!\n", inode);
+ dout("setattr %p ATTR_FILE ... hrm!\n", inode);
if (dirtied) {
__ceph_mark_dirty_caps(ci, dirtied);
req->r_num_caps = 1;
err = ceph_mdsc_do_request(mdsc, parent_inode, req);
}
- dout(10, "setattr %p result=%d (%s locally, %d remote)\n", inode, err,
+ dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
ceph_cap_string(dirtied), mask);
ceph_mdsc_put_request(req);
int err;
if (ceph_snap(inode) == CEPH_SNAPDIR) {
- dout(30, "do_getattr inode %p SNAPDIR\n", inode);
+ dout("do_getattr inode %p SNAPDIR\n", inode);
return 0;
}
- dout(30, "do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask));
+ dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask));
if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
return 0;
req->r_args.getattr.mask = cpu_to_le32(mask);
err = ceph_mdsc_do_request(mdsc, NULL, req);
ceph_mdsc_put_request(req);
- dout(20, "do_getattr result=%d\n", err);
+ dout("do_getattr result=%d\n", err);
return err;
}
xattr->should_free_name = should_free_name;
ci->i_xattrs.count++;
- dout(30, "__set_xattr count=%d\n", ci->i_xattrs.count);
+ dout("__set_xattr count=%d\n", ci->i_xattrs.count);
} else {
kfree(*newxattr);
*newxattr = NULL;
if (new) {
rb_link_node(&xattr->node, parent, p);
rb_insert_color(&xattr->node, &ci->i_xattrs.xattrs);
- dout(30, "__set_xattr_val p=%p\n", p);
+ dout("__set_xattr_val p=%p\n", p);
}
- dout(20, "__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
+ dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
return 0;
else if (c > 0)
p = &(*p)->rb_right;
else {
- dout(20, "__get_xattr %s: found %.*s\n", name,
+ dout("__get_xattr %s: found %.*s\n", name,
xattr->val_len, xattr->val);
return xattr;
}
}
- dout(20, "__get_xattr %s: not found\n", name);
+ dout("__get_xattr %s: not found\n", name);
return NULL;
}
struct ceph_inode_xattr *xattr = NULL;
p = rb_first(&ci->i_xattrs.xattrs);
- dout(30, "__copy_xattr_names count=%d\n", ci->i_xattrs.count);
+ dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
while (p) {
xattr = rb_entry(p, struct ceph_inode_xattr, node);
memcpy(dest, xattr->name, xattr->name_len);
dest[xattr->name_len] = '\0';
- dout(30, "dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
+ dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
xattr->name_len, ci->i_xattrs.names_size);
dest += xattr->name_len + 1;
p = rb_first(&ci->i_xattrs.xattrs);
- dout(20, "__destroy_xattrs p=%p\n", p);
+ dout("__destroy_xattrs p=%p\n", p);
while (p) {
xattr = rb_entry(p, struct ceph_inode_xattr, node);
tmp = p;
p = rb_next(tmp);
- dout(30, "__destroy_xattrs next p=%p (%.*s)\n", p,
+ dout("__destroy_xattrs next p=%p (%.*s)\n", p,
xattr->name_len, xattr->name);
rb_erase(tmp, &ci->i_xattrs.xattrs);
int err;
int i;
- dout(20, "__build_xattrs(): ci->i_xattrs.len=%d\n", ci->i_xattrs.len);
+ dout("__build_xattrs(): ci->i_xattrs.len=%d\n", ci->i_xattrs.len);
if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
return 0; /* already built */
int size = 4 + ci->i_xattrs.count*(4 + 4) +
ci->i_xattrs.names_size +
ci->i_xattrs.vals_size;
- dout(30, "__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
+ dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
ci->i_xattrs.count, ci->i_xattrs.names_size,
ci->i_xattrs.vals_size);
return (vir_xattr->getxattr_cb)(ci, value, size);
spin_lock(&inode->i_lock);
- dout(10, "getxattr %p ver=%lld index_ver=%lld\n", inode,
+ dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
ci->i_xattrs.version, ci->i_xattrs.index_version);
if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
int i;
spin_lock(&inode->i_lock);
- dout(10, "listxattr %p ver=%lld index_ver=%lld\n", inode,
+ dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
ci->i_xattrs.version, ci->i_xattrs.index_version);
if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
}
}
- dout(10, "setxattr value=%.*s\n", (int)size, value);
+ dout("setxattr value=%.*s\n", (int)size, value);
/* do request */
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
req->r_num_pages = nr_pages;
req->r_data_len = size;
- dout(30, "xattr.ver (before): %lld\n", ci->i_xattrs.version);
+ dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
err = ceph_mdsc_do_request(mdsc, parent_inode, req);
ceph_mdsc_put_request(req);
- dout(30, "xattr.ver (after): %lld\n", ci->i_xattrs.version);
+ dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
out:
if (pages) {
int prealloc_len = required_blob_size;
spin_unlock(&inode->i_lock);
- dout(30, " required_blob_size=%d\n", required_blob_size);
+ dout(" required_blob_size=%d\n", required_blob_size);
prealloc_blob = kmalloc(prealloc_len, GFP_NOFS);
if (!prealloc_blob)
goto out;
goto retry;
}
- dout(20, "setxattr %p issued %s\n", inode, ceph_cap_string(issued));
+ dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
err = __set_xattr(ci, newname, name_len, newval,
val_len, 1, 1, 1, &xattr);
__ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
spin_lock(&inode->i_lock);
__build_xattrs(inode);
issued = __ceph_caps_issued(ci, NULL);
- dout(10, "removexattr %p issued %s\n", inode, ceph_cap_string(issued));
+ dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
if (!(issued & CEPH_CAP_XATTR_EXCL))
goto do_sync;
#include "super.h"
#include "ceph_debug.h"
-int ceph_debug_ioctl __read_mostly = -1;
-#define DOUT_MASK DOUT_MASK_IOCTL
-#define DOUT_VAR ceph_debug_ioctl
-
/*
* ioctls
long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- dout(10, "ioctl file %p cmd %u arg %lu\n", file, cmd, arg);
+ dout("ioctl file %p cmd %u arg %lu\n", file, cmd, arg);
switch (cmd) {
case CEPH_IOC_GET_LAYOUT:
return ceph_ioctl_get_layout(file, (void __user *)arg);
#include "mon_client.h"
#include "ceph_debug.h"
-
-int ceph_debug_mdsc __read_mostly = -1;
-#define DOUT_VAR ceph_debug_mdsc
-#define DOUT_MASK DOUT_MASK_MDSC
#include "super.h"
#include "messenger.h"
#include "decode.h"
ceph_decode_need(p, end, info->dir_dname_len[i], bad);
info->dir_dname[i] = *p;
*p += info->dir_dname_len[i];
- dout(20, "parsed dir dname '%.*s'\n", info->dir_dname_len[i],
+ dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i],
info->dir_dname[i]);
info->dir_dlease[i] = *p;
*p += sizeof(struct ceph_mds_reply_lease);
static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
{
- dout(30, "get_session %p %d -> %d\n", s,
+ dout("get_session %p %d -> %d\n", s,
atomic_read(&s->s_ref), atomic_read(&s->s_ref)+1);
atomic_inc(&s->s_ref);
return s;
void ceph_put_mds_session(struct ceph_mds_session *s)
{
- dout(30, "put_session %p %d -> %d\n", s,
+ dout("put_session %p %d -> %d\n", s,
atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
if (atomic_dec_and_test(&s->s_ref))
kfree(s);
if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
return NULL;
session = mdsc->sessions[mds];
- dout(30, "lookup_mds_session %p %d -> %d\n", session,
+ dout("lookup_mds_session %p %d -> %d\n", session,
atomic_read(&session->s_ref), atomic_read(&session->s_ref)+1);
get_session(session);
return session;
INIT_LIST_HEAD(&s->s_cap_flushing);
s->s_cap_flush_tid = 0;
- dout(10, "register_session mds%d\n", mds);
+ dout("register_session mds%d\n", mds);
if (mds >= mdsc->max_sessions) {
int newmax = 1 << get_count_order(mds+1);
struct ceph_mds_session **sa;
- dout(50, "register_session realloc to %d\n", newmax);
+ dout("register_session realloc to %d\n", newmax);
sa = kzalloc(newmax * sizeof(void *), GFP_NOFS);
if (sa == NULL)
return ERR_PTR(-ENOMEM);
*/
static void unregister_session(struct ceph_mds_client *mdsc, int mds)
{
- dout(10, "unregister_session mds%d %p\n", mds, mdsc->sessions[mds]);
+ dout("unregister_session mds%d %p\n", mds, mdsc->sessions[mds]);
ceph_put_mds_session(mdsc->sessions[mds]);
mdsc->sessions[mds] = NULL;
}
void ceph_mdsc_put_request(struct ceph_mds_request *req)
{
- dout(10, "put_request %p %d -> %d\n", req,
+ dout("put_request %p %d -> %d\n", req,
atomic_read(&req->r_ref), atomic_read(&req->r_ref)-1);
if (atomic_dec_and_test(&req->r_ref)) {
if (req->r_request)
req->r_tid = ++mdsc->last_tid;
if (req->r_num_caps)
ceph_reserve_caps(&req->r_caps_reservation, req->r_num_caps);
- dout(30, "__register_request %p tid %lld\n", req, req->r_tid);
+ dout("__register_request %p tid %lld\n", req, req->r_tid);
ceph_mdsc_get_request(req);
radix_tree_insert(&mdsc->request_tree, req->r_tid, (void *)req);
static void __unregister_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
- dout(30, "__unregister_request %p tid %lld\n", req, req->r_tid);
+ dout("__unregister_request %p tid %lld\n", req, req->r_tid);
radix_tree_delete(&mdsc->request_tree, req->r_tid);
ceph_mdsc_put_request(req);
if (req->r_resend_mds >= 0 &&
(__have_session(mdsc, req->r_resend_mds) ||
ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
- dout(20, "choose_mds using resend_mds mds%d\n",
+ dout("choose_mds using resend_mds mds%d\n",
req->r_resend_mds);
return req->r_resend_mds;
}
get_random_bytes(&r, 1);
r %= frag.ndist;
mds = frag.dist[r];
- dout(20, "choose_mds %p %llx.%llx "
+ dout("choose_mds %p %llx.%llx "
"frag %u mds%d (%d/%d)\n",
dentry->d_inode,
ceph_vinop(&ci->vfs_inode),
if (frag.mds >= 0) {
/* choose auth mds */
mds = frag.mds;
- dout(20, "choose_mds %p %llx.%llx "
+ dout("choose_mds %p %llx.%llx "
"frag %u mds%d (auth)\n",
dentry->d_inode,
ceph_vinop(&ci->vfs_inode),
/* ok, just pick one at random */
random:
mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
- dout(20, "choose_mds chose random mds%d\n", mds);
+ dout("choose_mds chose random mds%d\n", mds);
return mds;
}
/* wait for mds to go active? */
mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
- dout(10, "open_session to mds%d (%s)\n", mds,
+ dout("open_session to mds%d (%s)\n", mds,
ceph_mds_state_name(mstate));
session->s_state = CEPH_MDS_SESSION_OPENING;
session->s_renew_requested = jiffies;
struct inode *inode;
int ret;
- dout(10, "iterate_session_caps %p mds%d\n", session, session->s_mds);
+ dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
spin_lock(&session->s_cap_lock);
list_for_each_entry_safe(cap, ncap, &session->s_caps, session_caps) {
inode = igrab(&cap->ci->vfs_inode);
void *arg)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- dout(10, "removing cap %p, ci is %p, inode is %p\n",
+ dout("removing cap %p, ci is %p, inode is %p\n",
cap, ci, &ci->vfs_inode);
ceph_remove_cap(cap);
return 0;
*/
static void remove_session_caps(struct ceph_mds_session *session)
{
- dout(10, "remove_session_caps on %p\n", session);
+ dout("remove_session_caps on %p\n", session);
iterate_session_caps(session, remove_session_caps_cb, NULL);
BUG_ON(session->s_nr_caps > 0);
static void wake_up_session_caps(struct ceph_mds_session *session)
{
- dout(10, "wake_up_session_caps %p mds%d\n", session, session->s_mds);
+ dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
iterate_session_caps(session, wake_up_session_cb, session);
}
if (time_after_eq(jiffies, session->s_cap_ttl) &&
time_after_eq(session->s_cap_ttl, session->s_renew_requested))
- dout(1, "mds%d session caps stale\n", session->s_mds);
+ pr_info("ceph mds%d session caps stale\n", session->s_mds);
/* do not try to renew caps until a recovering mds has reconnected
* with its clients. */
state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
if (state < CEPH_MDS_STATE_RECONNECT) {
- dout(10, "send_renew_caps ignoring mds%d (%s)\n",
+ dout("send_renew_caps ignoring mds%d (%s)\n",
session->s_mds, ceph_mds_state_name(state));
return 0;
}
- dout(10, "send_renew_caps to mds%d (%s)\n", session->s_mds,
+ dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
ceph_mds_state_name(state));
session->s_renew_requested = jiffies;
msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, 0);
if (was_stale) {
if (time_before(jiffies, session->s_cap_ttl)) {
- dout(1, "mds%d caps renewed\n", session->s_mds);
+ pr_info("ceph mds%d caps renewed\n", session->s_mds);
wake = 1;
} else {
- dout(1, "mds%d caps still stale\n", session->s_mds);
+ pr_info("ceph mds%d caps still stale\n",session->s_mds);
}
}
- dout(10, "renewed_caps mds%d ttl now %lu, was %s, now %s\n",
+ dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
spin_unlock(&session->s_cap_lock);
struct ceph_msg *msg;
int err = 0;
- dout(10, "request_close_session mds%d state %s seq %lld\n",
+ dout("request_close_session mds%d state %s seq %lld\n",
session->s_mds, session_state_name(session->s_state),
session->s_seq);
msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE,
used = __ceph_caps_used(ci);
oissued = __ceph_caps_issued_other(ci, cap);
- dout(20, "trim_caps_cb %p cap %p mine %s oissued %s used %s\n",
+ dout("trim_caps_cb %p cap %p mine %s oissued %s used %s\n",
inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
ceph_cap_string(used));
if (ci->i_dirty_caps)
/* try to drop referring dentries */
spin_unlock(&inode->i_lock);
d_prune_aliases(inode);
- dout(20, "trim_caps_cb %p cap %p pruned, count now %d\n",
+ dout("trim_caps_cb %p cap %p pruned, count now %d\n",
inode, cap, atomic_read(&inode->i_count));
return 0;
}
{
int trim_caps = session->s_nr_caps - max_caps;
- dout(10, "trim_caps mds%d start: %d / %d, trim %d\n",
+ dout("trim_caps mds%d start: %d / %d, trim %d\n",
session->s_mds, session->s_nr_caps, max_caps, trim_caps);
if (trim_caps > 0) {
session->s_trim_caps = trim_caps;
iterate_session_caps(session, trim_caps_cb, session);
- dout(10, "trim_caps mds%d done: %d / %d, trimmed %d\n",
+ dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
session->s_mds, session->s_nr_caps, max_caps,
trim_caps - session->s_trim_caps);
}
0, 0, NULL);
if (!msg)
goto out_unlocked;
- dout(10, "add_cap_releases %p msg %p now %d\n", session, msg,
+ dout("add_cap_releases %p msg %p now %d\n", session, msg,
(int)msg->front.iov_len);
head = msg->front.iov_base;
head->num = cpu_to_le32(0);
list_head);
head = msg->front.iov_base;
if (head->num) {
- dout(10, " queueing non-full %p (%d)\n", msg,
+ dout(" queueing non-full %p (%d)\n", msg,
le32_to_cpu(head->num));
list_move_tail(&msg->list_head,
&session->s_cap_releases_done);
{
struct ceph_msg *msg;
- dout(10, "send_cap_releases mds%d\n", session->s_mds);
+ dout("send_cap_releases mds%d\n", session->s_mds);
while (1) {
spin_lock(&session->s_cap_lock);
if (list_empty(&session->s_cap_releases_done))
list_del_init(&msg->list_head);
spin_unlock(&session->s_cap_lock);
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
- dout(10, "send_cap_releases mds%d %p\n", session->s_mds, msg);
+ dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
ceph_send_msg_mds(mdsc, msg, session->s_mds);
}
spin_unlock(&session->s_cap_lock);
struct inode *inode = temp->d_inode;
if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
- dout(50, "build_path_dentry path+%d: %p SNAPDIR\n",
+ dout("build_path_dentry path+%d: %p SNAPDIR\n",
pos, temp);
} else if (stop_on_nosnap && inode &&
ceph_snap(inode) == CEPH_NOSNAP) {
break;
strncpy(path + pos, temp->d_name.name,
temp->d_name.len);
- dout(50, "build_path_dentry path+%d: %p '%.*s'\n",
+ dout("build_path_dentry path+%d: %p '%.*s'\n",
pos, temp, temp->d_name.len, path + pos);
}
if (pos)
*base = ceph_ino(temp->d_inode);
*plen = len;
- dout(10, "build_path_dentry on %p %d built %llx '%.*s'\n",
+ dout("build_path_dentry on %p %d built %llx '%.*s'\n",
dentry, atomic_read(&dentry->d_count), *base, len, path);
return path;
}
if (rinode) {
r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
- dout(10, " inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
+ dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
ceph_snap(rinode));
} else if (rdentry) {
r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
- dout(10, " dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
+ dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
*ppath);
} else if (rpath) {
*ino = rino;
*ppath = rpath;
*pathlen = strlen(rpath);
- dout(10, " path %.*s\n", *pathlen, rpath);
+ dout(" path %.*s\n", *pathlen, rpath);
}
return r;
int flags = 0;
req->r_attempts++;
- dout(10, "prepare_send_request %p tid %lld %s (attempt %d)\n", req,
+ dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
if (req->r_request) {
rhead->num_fwd = req->r_num_fwd;
rhead->num_retry = req->r_attempts - 1;
- dout(20, " r_locked_dir = %p\n", req->r_locked_dir);
+ dout(" r_locked_dir = %p\n", req->r_locked_dir);
if (req->r_target_inode && req->r_got_unsafe)
rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
if (req->r_timeout &&
time_after_eq(jiffies, req->r_started + req->r_timeout)) {
- dout(10, "do_request timed out\n");
+ dout("do_request timed out\n");
err = -EIO;
goto finish;
}
mds = __choose_mds(mdsc, req);
if (mds < 0 ||
ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
- dout(30, "do_request no mds or not active, waiting for map\n");
+ dout("do_request no mds or not active, waiting for map\n");
list_add(&req->r_wait, &mdsc->waiting_for_map);
ceph_monc_request_mdsmap(&mdsc->client->monc,
mdsc->mdsmap->m_epoch+1);
session = __ceph_lookup_mds_session(mdsc, mds);
if (!session)
session = register_session(mdsc, mds);
- dout(30, "do_request mds%d session %p state %s\n", mds, session,
+ dout("do_request mds%d session %p state %s\n", mds, session,
session_state_name(session->s_state));
if (session->s_state != CEPH_MDS_SESSION_OPEN) {
if (session->s_state == CEPH_MDS_SESSION_NEW ||
u64 nexttid = 0;
int i, got;
- dout(20, "kick_requests mds%d\n", mds);
+ dout("kick_requests mds%d\n", mds);
while (nexttid <= mdsc->last_tid) {
got = radix_tree_gang_lookup(&mdsc->request_tree,
(void **)&reqs, nexttid, 10);
reqs[i]->r_session->s_mds == mds) ||
(all && reqs[i]->r_fwd_session &&
reqs[i]->r_fwd_session->s_mds == mds))) {
- dout(10, " kicking tid %llu\n", reqs[i]->r_tid);
+ dout(" kicking tid %llu\n", reqs[i]->r_tid);
put_request_sessions(reqs[i]);
__do_request(mdsc, reqs[i]);
}
void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
- dout(30, "submit_request on %p\n", req);
+ dout("submit_request on %p\n", req);
mutex_lock(&mdsc->mutex);
__register_request(mdsc, req, NULL);
__do_request(mdsc, req);
{
int err;
- dout(30, "do_request on %p\n", req);
+ dout("do_request on %p\n", req);
/* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
if (req->r_inode)
}
mutex_unlock(&mdsc->mutex);
- dout(30, "do_request %p done, result %d\n", req, err);
+ dout("do_request %p done, result %d\n", req, err);
return err;
}
mutex_lock(&mdsc->mutex);
req = __lookup_request(mdsc, tid);
if (!req) {
- dout(1, "handle_reply on unknown tid %llu\n", tid);
+ dout("handle_reply on unknown tid %llu\n", tid);
mutex_unlock(&mdsc->mutex);
return;
}
- dout(10, "handle_reply %p\n", req);
+ dout("handle_reply %p\n", req);
mds = le32_to_cpu(msg->hdr.src.name.num);
/* dup? */
* response. And even if it did, there is nothing
* useful we could do with a revised return value.
*/
- dout(10, "got safe reply %llu, mds%d\n", tid, mds);
+ dout("got safe reply %llu, mds%d\n", tid, mds);
BUG_ON(req->r_session == NULL);
list_del_init(&req->r_unsafe_item);
goto out_err;
}
result = le32_to_cpu(rinfo->head->result);
- dout(10, "handle_reply tid %lld result %d\n", tid, result);
+ dout("handle_reply tid %lld result %d\n", tid, result);
/*
* Tolerate 2 consecutive ESTALEs from the same mds.
mutex_lock(&mdsc->mutex);
req = __lookup_request(mdsc, tid);
if (!req) {
- dout(10, "forward %llu dne\n", tid);
+ dout("forward %llu dne\n", tid);
goto out; /* dup reply? */
}
if (fwd_seq <= req->r_num_fwd) {
- dout(10, "forward %llu to mds%d - old seq %d <= %d\n",
+ dout("forward %llu to mds%d - old seq %d <= %d\n",
tid, next_mds, req->r_num_fwd, fwd_seq);
} else if (!must_resend &&
__have_session(mdsc, next_mds) &&
mdsc->sessions[next_mds]->s_state == CEPH_MDS_SESSION_OPEN) {
/* yes. adjust our sessions, but that's all; the old mds
* forwarded our message for us. */
- dout(10, "forward %llu to mds%d (mds%d fwded)\n", tid, next_mds,
+ dout("forward %llu to mds%d (mds%d fwded)\n", tid, next_mds,
from_mds);
req->r_num_fwd = fwd_seq;
put_request_sessions(req);
} else {
/* no, resend. */
/* forward race not possible; mds would drop */
- dout(10, "forward %llu to mds%d (we resend)\n", tid, next_mds);
+ dout("forward %llu to mds%d (we resend)\n", tid, next_mds);
req->r_num_fwd = fwd_seq;
req->r_resend_mds = next_mds;
put_request_sessions(req);
if (!session) {
if (op != CEPH_SESSION_OPEN) {
- dout(10, "handle_session no session for mds%d\n", mds);
+ dout("handle_session no session for mds%d\n", mds);
return;
}
- dout(10, "handle_session creating session for mds%d\n", mds);
+ dout("handle_session creating session for mds%d\n", mds);
session = register_session(mdsc, mds);
}
mutex_lock(&session->s_mutex);
- dout(2, "handle_session mds%d %s %p state %s seq %llu\n",
+ dout("handle_session mds%d %s %p state %s seq %llu\n",
mds, ceph_session_op_name(op), session,
session_state_name(session->s_state), seq);
switch (op) {
break;
case CEPH_SESSION_STALE:
- dout(1, "mds%d caps went stale, renewing\n", session->s_mds);
+ pr_info("ceph mds%d caps went stale, renewing\n",
+ session->s_mds);
spin_lock(&session->s_cap_lock);
session->s_cap_gen++;
session->s_cap_ttl = 0;
struct ceph_mds_request *req, *nreq;
int err;
- dout(10, "replay_unsafe_requests mds%d\n", session->s_mds);
+ dout("replay_unsafe_requests mds%d\n", session->s_mds);
mutex_lock(&mdsc->mutex);
list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
ci = cap->ci;
- dout(10, " adding %p ino %llx.%llx cap %p %lld %s\n",
+ dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
inode, ceph_vinop(inode), cap, cap->cap_id,
ceph_cap_string(cap->issued));
ceph_decode_need(&p, end, sizeof(u64), needmore);
__le32 *pnum_caps, *pnum_realms;
struct encode_caps_data iter_args;
- dout(1, "reconnect to recovering mds%d\n", mds);
+ pr_info("ceph reconnect to recovering mds%d\n", mds);
/* find session */
session = __ceph_lookup_mds_session(mdsc, mds);
len += session->s_nr_caps *
sizeof(struct ceph_mds_cap_reconnect);
len += session->s_nr_caps * (100); /* guess! */
- dout(40, "estimating i need %d bytes for %d caps\n",
+ dout("estimating i need %d bytes for %d caps\n",
len, session->s_nr_caps);
} else {
- dout(20, "no session for mds%d, will send short reconnect\n",
+ dout("no session for mds%d, will send short reconnect\n",
mds);
}
ceph_encode_32(&p, 0);
goto send;
}
- dout(10, "session %p state %s\n", session,
+ dout("session %p state %s\n", session,
session_state_name(session->s_state));
/* traverse this session's caps */
if (!got)
break;
- dout(10, " adding snap realm %llx seq %lld parent %llx\n",
+ dout(" adding snap realm %llx seq %lld parent %llx\n",
realm->ino, realm->seq, realm->parent_ino);
ceph_decode_need(&p, end, sizeof(*sr_rec), needmore);
sr_rec = p;
send:
reply->front.iov_len = p - reply->front.iov_base;
reply->hdr.front_len = cpu_to_le32(reply->front.iov_len);
- dout(10, "final len was %u (guessed %d)\n",
+ dout("final len was %u (guessed %d)\n",
(unsigned)reply->front.iov_len, len);
ceph_send_msg_mds(mdsc, reply, mds);
*/
num_caps += num_realms;
newlen = (len * (session->s_nr_caps+3)) / (num_caps + 1);
- dout(30, "i guessed %d, and did %d of %d caps, retrying with %d\n",
+ dout("i guessed %d, and did %d of %d caps, retrying with %d\n",
len, num_caps, session->s_nr_caps, newlen);
len = newlen;
ceph_msg_put(reply);
int oldstate, newstate;
struct ceph_mds_session *s;
- dout(20, "check_new_map new %u old %u\n",
+ dout("check_new_map new %u old %u\n",
newmap->m_epoch, oldmap->m_epoch);
for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
oldstate = ceph_mdsmap_get_state(oldmap, i);
newstate = ceph_mdsmap_get_state(newmap, i);
- dout(20, "check_new_map mds%d state %s -> %s (session %s)\n",
+ dout("check_new_map mds%d state %s -> %s (session %s)\n",
i, ceph_mds_state_name(oldstate),
ceph_mds_state_name(newstate),
session_state_name(s->s_state));
if (le32_to_cpu(msg->hdr.src.name.type) != CEPH_ENTITY_TYPE_MDS)
return;
mds = le32_to_cpu(msg->hdr.src.name.num);
- dout(10, "handle_lease from mds%d\n", mds);
+ dout("handle_lease from mds%d\n", mds);
/* decode */
if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
/* lookup inode */
inode = ceph_find_inode(sb, vino);
- dout(20, "handle_lease '%s', mask %d, ino %llx %p\n",
+ dout("handle_lease '%s', mask %d, ino %llx %p\n",
ceph_lease_op_name(h->action), mask, vino.ino, inode);
if (inode == NULL) {
- dout(10, "handle_lease no inode %llx\n", vino.ino);
+ dout("handle_lease no inode %llx\n", vino.ino);
goto release;
}
ci = ceph_inode(inode);
/* dentry */
parent = d_find_alias(inode);
if (!parent) {
- dout(10, "no parent dentry on inode %p\n", inode);
+ dout("no parent dentry on inode %p\n", inode);
WARN_ON(1);
goto release; /* hrm... */
}
int len = sizeof(*lease) + sizeof(u32);
int dnamelen = 0;
- dout(30, "lease_send_msg inode %p dentry %p %s to mds%d\n",
+ dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
inode, dentry, ceph_lease_op_name(action), mds);
dnamelen = dentry->d_name.len;
len += dnamelen;
di->lease_session->s_mds < 0 ||
di->lease_gen != di->lease_session->s_cap_gen ||
!time_before(jiffies, dentry->d_time)) {
- dout(10, "lease_release inode %p dentry %p -- "
+ dout("lease_release inode %p dentry %p -- "
"no lease on %d\n",
inode, dentry, mask);
spin_unlock(&dentry->d_lock);
__ceph_mdsc_drop_dentry_lease(dentry);
spin_unlock(&dentry->d_lock);
- dout(10, "lease_release inode %p dentry %p mask %d to mds%d\n",
+ dout("lease_release inode %p dentry %p mask %d to mds%d\n",
inode, dentry, mask, mds);
ceph_mdsc_lease_send_msg(mdsc, mds, inode, dentry,
CEPH_MDS_LEASE_RELEASE, seq);
int renew_caps;
u32 want_map = 0;
- dout(30, "delayed_work\n");
+ dout("delayed_work\n");
ceph_check_delayed_caps(mdsc, 0);
mutex_lock(&mdsc->mutex);
if (s == NULL)
continue;
if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
- dout(10, "resending session close request for mds%d\n",
+ dout("resending session close request for mds%d\n",
s->s_mds);
request_close_session(mdsc, s);
ceph_put_mds_session(s);
{
int i;
- dout(10, "drop_leases\n");
+ dout("drop_leases\n");
mutex_lock(&mdsc->mutex);
for (i = 0; i < mdsc->max_sessions; i++) {
struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
mutex_lock(&mdsc->mutex);
if (__get_oldest_tid(mdsc)) {
mutex_unlock(&mdsc->mutex);
- dout(10, "wait_requests waiting for requests\n");
+ dout("wait_requests waiting for requests\n");
wait_for_completion_timeout(&mdsc->safe_umount_waiters,
client->mount_args.mount_timeout * HZ);
mutex_lock(&mdsc->mutex);
/* tear down remaining requests */
while (radix_tree_gang_lookup(&mdsc->request_tree,
(void **)&req, 0, 1)) {
- dout(10, "wait_requests timed out on tid %llu\n",
+ dout("wait_requests timed out on tid %llu\n",
req->r_tid);
radix_tree_delete(&mdsc->request_tree, req->r_tid);
ceph_mdsc_put_request(req);
}
}
mutex_unlock(&mdsc->mutex);
- dout(10, "wait_requests done\n");
+ dout("wait_requests done\n");
}
/*
*/
void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
{
- dout(10, "pre_umount\n");
+ dout("pre_umount\n");
mdsc->stopping = 1;
drop_leases(mdsc);
{
int mds, ret = 1;
- dout(10, "check_cap_flush want %lld\n", want_flush_seq);
+ dout("check_cap_flush want %lld\n", want_flush_seq);
mutex_lock(&mdsc->mutex);
for (mds = 0; ret && mds < mdsc->max_sessions; mds++) {
struct ceph_mds_session *session = mdsc->sessions[mds];
spin_lock(&inode->i_lock);
if (ci->i_cap_flush_seq <= want_flush_seq) {
- dout(10, "check_cap_flush still flushing %p "
+ dout("check_cap_flush still flushing %p "
"seq %lld <= %lld to mds%d\n", inode,
ci->i_cap_flush_seq, want_flush_seq,
session->s_mds);
}
mutex_unlock(&mdsc->mutex);
- dout(10, "check_cap_flush ok, flushed thru %lld\n", want_flush_seq);
+ dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq);
return ret;
}
int got;
mutex_lock(&mdsc->mutex);
- dout(10, "wait_unsafe_requests want %lld\n", want_tid);
+ dout("wait_unsafe_requests want %lld\n", want_tid);
while (1) {
got = radix_tree_gang_lookup(&mdsc->request_tree, (void **)&req,
next_tid, 1);
ceph_mdsc_get_request(req);
mutex_unlock(&mdsc->mutex);
- dout(10, "wait_unsafe_requests wait on %llu (want %llu)\n",
+ dout("wait_unsafe_requests wait on %llu (want %llu)\n",
req->r_tid, want_tid);
wait_for_completion(&req->r_safe_completion);
mutex_lock(&mdsc->mutex);
ceph_mdsc_put_request(req);
}
mutex_unlock(&mdsc->mutex);
- dout(10, "wait_unsafe_requests done\n");
+ dout("wait_unsafe_requests done\n");
}
void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
{
u64 want_tid, want_flush;
- dout(10, "sync\n");
+ dout("sync\n");
mutex_lock(&mdsc->mutex);
want_tid = mdsc->last_tid;
want_flush = mdsc->cap_flush_seq;
mutex_unlock(&mdsc->mutex);
- dout(10, "sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
+ dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
ceph_check_delayed_caps(mdsc, 1);
struct ceph_client *client = mdsc->client;
unsigned long started, timeout = client->mount_args.mount_timeout * HZ;
- dout(10, "close_sessions\n");
+ dout("close_sessions\n");
mutex_lock(&mdsc->mutex);
/* close sessions */
started = jiffies;
while (time_before(jiffies, started + timeout)) {
- dout(10, "closing sessions\n");
+ dout("closing sessions\n");
n = 0;
for (i = 0; i < mdsc->max_sessions; i++) {
session = __ceph_lookup_mds_session(mdsc, i);
if (client->mount_state == CEPH_MOUNT_SHUTDOWN)
break;
- dout(10, "waiting for sessions to close\n");
+ dout("waiting for sessions to close\n");
mutex_unlock(&mdsc->mutex);
wait_for_completion_timeout(&mdsc->session_close_waiters,
timeout);
cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
- dout(10, "stopped\n");
+ dout("stopped\n");
}
void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
{
- dout(10, "stop\n");
+ dout("stop\n");
cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
if (mdsc->mdsmap)
ceph_mdsmap_destroy(mdsc->mdsmap);
}
ceph_decode_32(&p, epoch);
ceph_decode_32(&p, maplen);
- dout(2, "handle_map epoch %u len %d\n", epoch, (int)maplen);
+ dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
/* do we need it? */
ceph_monc_got_mdsmap(&mdsc->client->monc, epoch);
mutex_lock(&mdsc->mutex);
if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
- dout(2, "handle_map epoch %u <= our %u\n",
+ dout("handle_map epoch %u <= our %u\n",
epoch, mdsc->mdsmap->m_epoch);
mutex_unlock(&mdsc->mutex);
return;
#include "decode.h"
#include "ceph_debug.h"
-
-int ceph_debug_mdsmap __read_mostly = -1;
-#define DOUT_MASK DOUT_MASK_MDSMAP
-#define DOUT_VAR ceph_debug_mdsmap
#include "super.h"
ceph_decode_64(p, state_seq);
ceph_decode_copy(p, &addr, sizeof(addr));
*p += sizeof(struct ceph_timespec) + 2*sizeof(u32);
- dout(10, "mdsmap_decode %d/%d mds%d.%d %u.%u.%u.%u:%u %s\n",
+ dout("mdsmap_decode %d/%d mds%d.%d %u.%u.%u.%u:%u %s\n",
i+1, n, mds, inc, IPQUADPORT(addr.ipaddr),
ceph_mds_state_name(state));
if (mds >= 0 && mds < m->m_max_mds && state > 0) {
ceph_decode_32(p, m->m_cas_pg_pool);
/* ok, we don't care about the rest. */
- dout(30, "mdsmap_decode success epoch %u\n", m->m_epoch);
+ dout("mdsmap_decode success epoch %u\n", m->m_epoch);
return m;
badmem:
#include <net/tcp.h>
#include "ceph_debug.h"
-int ceph_debug_msgr __read_mostly;
-#define DOUT_MASK DOUT_MASK_MSGR
-#define DOUT_VAR ceph_debug_msgr
-
#include "super.h"
#include "messenger.h"
{
struct ceph_messenger *msgr = (struct ceph_messenger *)sk->sk_user_data;
- dout(30, "ceph_accept_ready messenger %p sk_state = %u\n",
+ dout("ceph_accept_ready messenger %p sk_state = %u\n",
msgr, sk->sk_state);
if (sk->sk_state == TCP_LISTEN)
queue_work(ceph_msgr_wq, &msgr->awork);
struct ceph_connection *con =
(struct ceph_connection *)sk->sk_user_data;
if (sk->sk_state != TCP_CLOSE_WAIT) {
- dout(30, "ceph_data_ready on %p state = %lu, queueing work\n",
+ dout("ceph_data_ready on %p state = %lu, queueing work\n",
con, con->state);
ceph_queue_con(con);
}
/* only queue to workqueue if there is data we want to write. */
if (test_bit(WRITE_PENDING, &con->state)) {
- dout(30, "ceph_write_space %p queueing write work\n", con);
+ dout("ceph_write_space %p queueing write work\n", con);
ceph_queue_con(con);
} else {
- dout(30, "ceph_write_space %p nothing to write\n", con);
+ dout("ceph_write_space %p nothing to write\n", con);
}
/* since we have our own write_space, clear the SOCK_NOSPACE flag */
struct ceph_connection *con =
(struct ceph_connection *)sk->sk_user_data;
- dout(30, "ceph_state_change %p state = %lu sk_state = %u\n",
+ dout("ceph_state_change %p state = %lu sk_state = %u\n",
con, con->state, sk->sk_state);
if (test_bit(CLOSED, &con->state))
switch (sk->sk_state) {
case TCP_CLOSE:
- dout(30, "ceph_state_change TCP_CLOSE\n");
+ dout("ceph_state_change TCP_CLOSE\n");
case TCP_CLOSE_WAIT:
- dout(30, "ceph_state_change TCP_CLOSE_WAIT\n");
+ dout("ceph_state_change TCP_CLOSE_WAIT\n");
set_bit(SOCK_CLOSED, &con->state);
if (test_bit(CONNECTING, &con->state))
con->error_msg = "connection failed";
ceph_queue_con(con);
break;
case TCP_ESTABLISHED:
- dout(30, "ceph_state_change TCP_ESTABLISHED\n");
+ dout("ceph_state_change TCP_ESTABLISHED\n");
ceph_queue_con(con);
break;
}
set_sock_callbacks(sock, con);
- dout(20, "connect %u.%u.%u.%u:%u\n",
+ dout("connect %u.%u.%u.%u:%u\n",
IPQUADPORT(*(struct sockaddr_in *)paddr));
ret = sock->ops->connect(sock, paddr,
sizeof(struct sockaddr_in), O_NONBLOCK);
if (ret == -EINPROGRESS) {
- dout(20, "connect %u.%u.%u.%u:%u EINPROGRESS sk_state = %u\n",
+ dout("connect %u.%u.%u.%u:%u EINPROGRESS sk_state = %u\n",
IPQUADPORT(*(struct sockaddr_in *)paddr),
sock->sk->sk_state);
ret = 0;
INIT_LIST_HEAD(&con->out_sent);
INIT_DELAYED_WORK(&con->work, con_work);
- dout(20, "new connection: %p\n", con);
+ dout("new connection: %p\n", con);
return con;
}
yes:
atomic_inc(&con->nref);
- dout(20, "get_connection %p nref = %d -> %d\n", con,
+ dout("get_connection %p nref = %d -> %d\n", con,
atomic_read(&con->nref) - 1, atomic_read(&con->nref));
return con;
}
{
int rc;
- dout(10, "con_close_socket on %p sock %p\n", con, con->sock);
+ dout("con_close_socket on %p sock %p\n", con, con->sock);
if (!con->sock)
return 0;
rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
*/
static void put_connection(struct ceph_connection *con)
{
- dout(20, "put_connection %p nref = %d -> %d\n", con,
+ dout("put_connection %p nref = %d -> %d\n", con,
atomic_read(&con->nref), atomic_read(&con->nref) - 1);
BUG_ON(atomic_read(&con->nref) == 0);
if (atomic_dec_and_test(&con->nref)) {
- dout(20, "put_connection %p destroying\n", con);
+ dout("put_connection %p destroying\n", con);
ceph_msg_put_list(&con->out_queue);
ceph_msg_put_list(&con->out_sent);
set_bit(CLOSED, &con->state);
unsigned long key = hash_addr(&con->peer_addr);
int rc = 0;
- dout(20, "register_connection %p %d -> %d\n", con,
+ dout("register_connection %p %d -> %d\n", con,
atomic_read(&con->nref), atomic_read(&con->nref) + 1);
atomic_inc(&con->nref);
head = radix_tree_lookup(&msgr->con_tree, key);
if (head) {
- dout(20, "register_connection %p in old bucket %lu head %p\n",
+ dout("register_connection %p in old bucket %lu head %p\n",
con, key, head);
list_add(&con->list_bucket, head);
} else {
- dout(20, "register_connection %p in new bucket %lu head %p\n",
+ dout("register_connection %p in new bucket %lu head %p\n",
con, key, &con->list_bucket);
INIT_LIST_HEAD(&con->list_bucket); /* empty */
rc = radix_tree_insert(&msgr->con_tree, key, &con->list_bucket);
static void add_connection_accepting(struct ceph_messenger *msgr,
struct ceph_connection *con)
{
- dout(20, "add_connection_accepting %p nref = %d -> %d\n", con,
+ dout("add_connection_accepting %p nref = %d -> %d\n", con,
atomic_read(&con->nref), atomic_read(&con->nref) + 1);
atomic_inc(&con->nref);
spin_lock(&msgr->con_lock);
unsigned long key;
void **slot, *val;
- dout(20, "__remove_connection %p\n", con);
+ dout("__remove_connection %p\n", con);
if (list_empty(&con->list_all)) {
- dout(20, "__remove_connection %p not registered\n", con);
+ dout("__remove_connection %p not registered\n", con);
return;
}
list_del_init(&con->list_all);
key = hash_addr(&con->peer_addr);
if (list_empty(&con->list_bucket)) {
/* last one in this bucket */
- dout(20, "__remove_connection %p and bucket %lu\n",
+ dout("__remove_connection %p and bucket %lu\n",
con, key);
radix_tree_delete(&msgr->con_tree, key);
} else {
* to us, adjust it to point to the next guy. */
slot = radix_tree_lookup_slot(&msgr->con_tree, key);
val = radix_tree_deref_slot(slot);
- dout(20, "__remove_connection %p from bucket %lu "
+ dout("__remove_connection %p from bucket %lu "
"head %p\n", con, key, val);
if (val == &con->list_bucket) {
- dout(20, "__remove_connection adjusting bucket"
+ dout("__remove_connection adjusting bucket"
" for %lu to next item, %p\n", key,
con->list_bucket.next);
radix_tree_replace_slot(slot,
unsigned long key = hash_addr(&new->peer_addr);
void **slot;
- dout(10, "replace_connection %p with %p\n", old, new);
+ dout("replace_connection %p with %p\n", old, new);
/* replace in con_tree */
slot = radix_tree_lookup_slot(&msgr->con_tree, key);
{
struct ceph_msg *m = con->out_msg;
- dout(10, "prepare_write_message_footer %p\n", con);
+ dout("prepare_write_message_footer %p\n", con);
con->out_kvec[v].iov_base = &m->footer;
con->out_kvec[v].iov_len = sizeof(m->footer);
con->out_kvec_bytes += sizeof(m->footer);
list_move_tail(&m->list_head, &con->out_sent);
con->out_msg = m; /* we don't bother taking a reference here. */
- dout(20, "prepare_write_message %p seq %lld type %d len %d+%d %d pgs\n",
+ dout("prepare_write_message %p seq %lld type %d len %d+%d %d pgs\n",
m, le64_to_cpu(m->hdr.seq), le16_to_cpu(m->hdr.type),
le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.data_len),
m->nr_pages);
*/
static void prepare_write_ack(struct ceph_connection *con)
{
- dout(20, "prepare_write_ack %p %u -> %u\n", con,
+ dout("prepare_write_ack %p %u -> %u\n", con,
con->in_seq_acked, con->in_seq);
con->in_seq_acked = con->in_seq;
{
int len = strlen(CEPH_BANNER);
- dout(10, "prepare_write_connect %p\n", con);
+ dout("prepare_write_connect %p\n", con);
con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
con->out_connect.global_seq =
static void prepare_write_connect_retry(struct ceph_messenger *msgr,
struct ceph_connection *con)
{
- dout(10, "prepare_write_connect_retry %p\n", con);
+ dout("prepare_write_connect_retry %p\n", con);
con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
con->out_connect.global_seq =
cpu_to_le32(get_global_seq(con->msgr, 0));
{
int len = strlen(CEPH_BANNER);
- dout(10, "prepare_write_accept_hello %p\n", con);
+ dout("prepare_write_accept_hello %p\n", con);
con->out_kvec[0].iov_base = CEPH_BANNER;
con->out_kvec[0].iov_len = len;
con->out_kvec[1].iov_base = &msgr->inst.addr;
*/
static void prepare_write_accept_reply(struct ceph_connection *con, bool retry)
{
- dout(10, "prepare_write_accept_reply %p\n", con);
+ dout("prepare_write_accept_reply %p\n", con);
con->out_reply.flags = 0;
if (test_bit(LOSSYTX, &con->state))
con->out_reply.flags = CEPH_MSG_CONNECT_LOSSY;
{
int ret;
- dout(10, "write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
+ dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
while (con->out_kvec_bytes > 0) {
ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
con->out_kvec_left, con->out_kvec_bytes,
con->out_kvec_left = 0;
ret = 1;
out:
- dout(30, "write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
+ dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
con->out_kvec_bytes, con->out_kvec_left, ret);
return ret; /* done! */
}
int crc = !ceph_test_opt(client, NOCRC);
int ret;
- dout(30, "write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
+ dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
con->out_msg_pos.page_pos);
}
}
- dout(30, "write_partial_msg_pages %p msg %p done\n", con, msg);
+ dout("write_partial_msg_pages %p msg %p done\n", con, msg);
/* prepare and queue up footer, too */
if (!crc)
*/
static void prepare_read_connect(struct ceph_connection *con)
{
- dout(10, "prepare_read_connect %p\n", con);
+ dout("prepare_read_connect %p\n", con);
con->in_base_pos = 0;
}
static void prepare_read_ack(struct ceph_connection *con)
{
- dout(10, "prepare_read_ack %p\n", con);
+ dout("prepare_read_ack %p\n", con);
con->in_base_pos = 0;
}
static void prepare_read_tag(struct ceph_connection *con)
{
- dout(10, "prepare_read_tag %p\n", con);
+ dout("prepare_read_tag %p\n", con);
con->in_base_pos = 0;
con->in_tag = CEPH_MSGR_TAG_READY;
}
{
int err;
- dout(10, "prepare_read_message %p\n", con);
+ dout("prepare_read_message %p\n", con);
con->in_base_pos = 0;
BUG_ON(con->in_msg != NULL);
con->in_msg = ceph_msg_new(0, 0, 0, 0, NULL);
{
int ret, to = 0;
- dout(20, "read_partial_connect %p at %d\n", con, con->in_base_pos);
+ dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
/* peer's banner */
ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
if (ret <= 0)
goto out;
- dout(20, "read_partial_connect %p connect_seq = %u, global_seq = %u\n",
+ dout("read_partial_connect %p connect_seq = %u, global_seq = %u\n",
con, le32_to_cpu(con->in_reply.connect_seq),
le32_to_cpu(con->in_reply.global_seq));
out:
static int process_connect(struct ceph_connection *con)
{
- dout(20, "process_connect on %p tag %d\n", con, (int)con->in_tag);
+ dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
if (verify_hello(con) < 0)
return -1;
* that they must have reset their session, and may have
* dropped messages.
*/
- dout(10, "process_connect got RESET peer seq %u\n",
+ dout("process_connect got RESET peer seq %u\n",
le32_to_cpu(con->in_connect.connect_seq));
reset_connection(con);
prepare_write_connect_retry(con->msgr, con);
* If we sent a smaller connect_seq than the peer has, try
* again with a larger value.
*/
- dout(10,
- "process_connect got RETRY my seq = %u, peer_seq = %u\n",
+ dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
le32_to_cpu(con->out_connect.connect_seq),
le32_to_cpu(con->in_connect.connect_seq));
con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
* If we sent a smaller global_seq than the peer has, try
* again with a larger value.
*/
- dout(10,
- "process_connect got RETRY_GLOBAL my %u, peer_gseq = %u\n",
+ dout("process_connect got RETRY_GLOBAL my %u, peer_gseq = %u\n",
con->peer_global_seq,
le32_to_cpu(con->in_connect.global_seq));
get_global_seq(con->msgr,
* our queued messages, in expectation of being replaced by an
* incoming connection.
*/
- dout(10, "process_connect peer connecting WAIT\n");
+ dout("process_connect peer connecting WAIT\n");
set_bit(WAIT, &con->state);
con_close_socket(con);
break;
set_bit(LOSSYRX, &con->state);
con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
con->connect_seq++;
- dout(10, "process_connect got READY gseq %d cseq %d (%d)\n",
+ dout("process_connect got READY gseq %d cseq %d (%d)\n",
con->peer_global_seq,
le32_to_cpu(con->in_reply.connect_seq),
con->connect_seq);
bool retry = true;
bool replace = false;
- dout(10, "process_accept %p got gseq %d cseq %d\n", con,
+ dout("process_accept %p got gseq %d cseq %d\n", con,
peer_gseq, peer_cseq);
if (verify_hello(con) < 0)
goto reply;
}
if (test_bit(LOSSYTX, &existing->state)) {
- dout(20, "process_accept %p replacing LOSSYTX %p\n",
+ dout("process_accept %p replacing LOSSYTX %p\n",
con, existing);
replace = true;
goto accept;
if (peer_cseq == existing->connect_seq) {
/* connection race */
- dout(20, "process_accept connection race state = %lu\n",
+ dout("process_accept connection race state = %lu\n",
con->state);
if (ceph_entity_addr_equal(&msgr->inst.addr,
&con->peer_addr)) {
}
if (peer_cseq == 0) {
- dout(20, "process_accept no existing connection, opening\n");
+ dout("process_accept no existing connection, opening\n");
goto accept;
} else {
- dout(20, "process_accept no existing connection, we reset\n");
+ dout("process_accept no existing connection, we reset\n");
con->out_reply.tag = CEPH_MSGR_TAG_RESETSESSION;
goto reply;
}
/* accept this connection */
con->connect_seq = peer_cseq + 1;
con->peer_global_seq = peer_gseq;
- dout(10, "process_accept %p cseq %d peer_gseq %d %s\n", con,
+ dout("process_accept %p cseq %d peer_gseq %d %s\n", con,
con->connect_seq, peer_gseq, replace ? "replace" : "new");
con->out_reply.tag = CEPH_MSGR_TAG_READY;
seq = le64_to_cpu(m->hdr.seq);
if (seq > ack)
break;
- dout(5, "got ack for seq %llu type %d at %p\n", seq,
+ dout("got ack for seq %llu type %d at %p\n", seq,
le16_to_cpu(m->hdr.type), m);
ceph_msg_remove(m);
}
struct ceph_client *client = con->msgr->parent;
int datacrc = !ceph_test_opt(client, NOCRC);
- dout(20, "read_partial_message con %p msg %p\n", con, m);
+ dout("read_partial_message con %p msg %p\n", con, m);
/* header */
while (con->in_base_pos < sizeof(m->hdr)) {
BUG_ON(!con->msgr->prepare_pages);
ret = con->msgr->prepare_pages(con->msgr->parent, m, want);
if (ret < 0) {
- dout(10, "prepare_pages failed, skipping payload\n");
+ dout("prepare_pages failed, skipping payload\n");
con->in_base_pos = -data_len - sizeof(m->footer);
ceph_msg_put(con->in_msg);
con->in_msg = NULL;
(int)(PAGE_SIZE - con->in_msg_pos.page_pos));
mutex_lock(&m->page_mutex);
if (!m->pages) {
- dout(10, "pages revoked during msg read\n");
+ dout("pages revoked during msg read\n");
mutex_unlock(&m->page_mutex);
con->in_base_pos = con->in_msg_pos.data_pos - data_len -
sizeof(m->footer);
return ret;
con->in_base_pos += ret;
}
- dout(20, "read_partial_message got msg %p\n", m);
+ dout("read_partial_message got msg %p\n", m);
/* crc ok? */
if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
* safe.
*/
con->msgr->inst.addr.ipaddr = con->in_msg->hdr.dst.addr.ipaddr;
- dout(10, "read_partial_message learned my addr is "
+ dout("read_partial_message learned my addr is "
"%u.%u.%u.%u:%u\n",
IPQUADPORT(con->msgr->inst.addr.ipaddr));
}
con->in_seq++;
spin_unlock(&con->out_queue_lock);
- dout(1, "===== %p %llu from %s%d %d=%s len %d+%d (%u %u) =====\n",
+ dout("===== %p %llu from %s%d %d=%s len %d+%d (%u %u) =====\n",
con->in_msg, le64_to_cpu(con->in_msg->hdr.seq),
ENTITY_NAME(con->in_msg->hdr.src.name),
le16_to_cpu(con->in_msg->hdr.type),
struct ceph_messenger *msgr = con->msgr;
int ret = 1;
- dout(30, "try_write start %p state %lu nref %d\n", con, con->state,
+ dout("try_write start %p state %lu nref %d\n", con, con->state,
atomic_read(&con->nref));
more:
- dout(30, "try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
+ dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
/* open the socket first? */
if (con->sock == NULL) {
set_bit(CONNECTING, &con->state);
con->in_tag = CEPH_MSGR_TAG_READY;
- dout(5, "try_write initiating connect on %p new state %lu\n",
+ dout("try_write initiating connect on %p new state %lu\n",
con, con->state);
con->sock = ceph_tcp_connect(con);
if (IS_ERR(con->sock)) {
if (ret <= 0)
goto done;
if (ret < 0) {
- dout(30, "try_write write_partial_kvec err %d\n", ret);
+ dout("try_write write_partial_kvec err %d\n", ret);
goto done;
}
}
if (ret == 0)
goto done;
if (ret < 0) {
- dout(30, "try_write write_partial_msg_pages err %d\n",
+ dout("try_write write_partial_msg_pages err %d\n",
ret);
goto done;
}
/* Nothing to do! */
clear_bit(WRITE_PENDING, &con->state);
- dout(30, "try_write nothing else to write.\n");
+ dout("try_write nothing else to write.\n");
done:
ret = 0;
out:
- dout(30, "try_write done on %p\n", con);
+ dout("try_write done on %p\n", con);
return ret;
}
if (test_bit(STANDBY, &con->state))
return 0;
- dout(20, "try_read start on %p\n", con);
+ dout("try_read start on %p\n", con);
msgr = con->msgr;
more:
- dout(20, "try_read tag %d in_base_pos %d\n", (int)con->in_tag,
+ dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
con->in_base_pos);
if (test_bit(ACCEPTING, &con->state)) {
- dout(20, "try_read accepting\n");
+ dout("try_read accepting\n");
ret = read_partial_accept(con);
if (ret <= 0)
goto done;
goto more;
}
if (test_bit(CONNECTING, &con->state)) {
- dout(20, "try_read connecting\n");
+ dout("try_read connecting\n");
ret = read_partial_connect(con);
if (ret <= 0)
goto done;
*/
static char buf[1024];
int skip = min(1024, -con->in_base_pos);
- dout(20, "skipping %d / %d bytes\n", skip, -con->in_base_pos);
+ dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
ret = ceph_tcp_recvmsg(con->sock, buf, skip);
if (ret <= 0)
goto done;
ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
if (ret <= 0)
goto done;
- dout(30, "try_read got tag %d\n", (int)con->in_tag);
+ dout("try_read got tag %d\n", (int)con->in_tag);
switch (con->in_tag) {
case CEPH_MSGR_TAG_MSG:
prepare_read_message(con);
done:
ret = 0;
out:
- dout(20, "try_read done on %p\n", con);
+ dout("try_read done on %p\n", con);
return ret;
bad_tag:
{
if (test_bit(WAIT, &con->state) ||
test_bit(CLOSED, &con->state)) {
- dout(40, "ceph_queue_con %p ignoring: WAIT|CLOSED\n",
+ dout("ceph_queue_con %p ignoring: WAIT|CLOSED\n",
con);
return;
}
atomic_inc(&con->nref);
- dout(40, "ceph_queue_con %p %d -> %d\n", con,
+ dout("ceph_queue_con %p %d -> %d\n", con,
atomic_read(&con->nref) - 1, atomic_read(&con->nref));
set_bit(QUEUED, &con->state);
if (test_bit(BUSY, &con->state) ||
!queue_work(ceph_msgr_wq, &con->work.work)) {
- dout(40, "ceph_queue_con %p - already BUSY or queued\n", con);
+ dout("ceph_queue_con %p - already BUSY or queued\n", con);
put_connection(con);
}
}
more:
if (test_and_set_bit(BUSY, &con->state) != 0) {
- dout(10, "con_work %p BUSY already set\n", con);
+ dout("con_work %p BUSY already set\n", con);
goto out;
}
- dout(10, "con_work %p start, clearing QUEUED\n", con);
+ dout("con_work %p start, clearing QUEUED\n", con);
clear_bit(QUEUED, &con->state);
if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
- dout(5, "con_work CLOSED\n");
+ dout("con_work CLOSED\n");
goto done;
}
if (test_bit(WAIT, &con->state)) { /* we are a zombie */
- dout(5, "con_work WAIT\n");
+ dout("con_work WAIT\n");
goto done;
}
done:
clear_bit(BUSY, &con->state);
- dout(10, "con->state=%lu\n", con->state);
+ dout("con->state=%lu\n", con->state);
if (test_bit(QUEUED, &con->state)) {
if (!backoff) {
- dout(10, "con_work %p QUEUED reset, looping\n", con);
+ dout("con_work %p QUEUED reset, looping\n", con);
goto more;
}
- dout(10, "con_work %p QUEUED reset, but just faulted\n", con);
+ dout("con_work %p QUEUED reset, but just faulted\n", con);
clear_bit(QUEUED, &con->state);
}
- dout(10, "con_work %p done\n", con);
+ dout("con_work %p done\n", con);
out:
put_connection(con);
{
pr_err("ceph %s%d %u.%u.%u.%u:%u %s\n", ENTITY_NAME(con->peer_name),
IPQUADPORT(con->peer_addr.ipaddr), con->error_msg);
- dout(10, "fault %p state %lu to peer %u.%u.%u.%u:%u\n",
+ dout("fault %p state %lu to peer %u.%u.%u.%u:%u\n",
con, con->state, IPQUADPORT(con->peer_addr.ipaddr));
if (test_bit(LOSSYTX, &con->state)) {
- dout(30, "fault on LOSSYTX channel\n");
+ dout("fault on LOSSYTX channel\n");
remove_connection(con->msgr, con);
return;
}
* in a STANDBY state (i.e., don't try to reconnect just yet). */
spin_lock(&con->out_queue_lock);
if (list_empty(&con->out_queue)) {
- dout(10, "fault setting STANDBY\n");
+ dout("fault setting STANDBY\n");
set_bit(STANDBY, &con->state);
spin_unlock(&con->out_queue_lock);
return;
con->delay *= 2;
/* explicitly schedule work to try to reconnect again later. */
- dout(40, "fault queueing %p %d -> %d delay %lu\n", con,
+ dout("fault queueing %p %d -> %d delay %lu\n", con,
atomic_read(&con->nref), atomic_read(&con->nref) + 1,
con->delay);
atomic_inc(&con->nref);
put_connection(newcon);
return;
}
- dout(5, "accepted connection \n");
+ dout("accepted connection \n");
prepare_write_accept_hello(msgr, newcon);
add_connection_accepting(msgr, newcon);
if (myaddr) {
msgr->inst.addr = *myaddr;
} else {
- dout(10, "create my ip not specified, binding to INADDR_ANY\n");
+ dout("create my ip not specified, binding to INADDR_ANY\n");
msgr->inst.addr.ipaddr.sin_addr.s_addr = htonl(INADDR_ANY);
msgr->inst.addr.ipaddr.sin_port = htons(0); /* any port */
}
if (myaddr)
msgr->inst.addr.ipaddr.sin_addr = myaddr->ipaddr.sin_addr;
- dout(1, "messenger %p listening on %u.%u.%u.%u:%u\n", msgr,
+ dout("messenger %p listening on %u.%u.%u.%u:%u\n", msgr,
IPQUADPORT(msgr->inst.addr.ipaddr));
return msgr;
}
{
struct ceph_connection *con;
- dout(2, "destroy %p\n", msgr);
+ dout("destroy %p\n", msgr);
/* stop listener */
msgr->listen_sock->ops->shutdown(msgr->listen_sock, SHUT_RDWR);
while (!list_empty(&msgr->con_all)) {
con = list_first_entry(&msgr->con_all, struct ceph_connection,
list_all);
- dout(10, "destroy removing connection %p\n", con);
+ dout("destroy removing connection %p\n", con);
set_bit(CLOSED, &con->state);
atomic_inc(&con->nref);
- dout(40, " get %p %d -> %d\n", con,
+ dout(" get %p %d -> %d\n", con,
atomic_read(&con->nref) - 1, atomic_read(&con->nref));
__remove_connection(msgr, con);
if (cancel_delayed_work_sync(&con->work))
put_connection(con);
put_connection(con);
- dout(10, "destroy removed connection %p\n", con);
+ dout("destroy removed connection %p\n", con);
spin_lock(&msgr->con_lock);
}
__free_page(msgr->zero_page);
kfree(msgr);
- dout(10, "destroyed messenger %p\n", msgr);
+ dout("destroyed messenger %p\n", msgr);
}
/*
{
struct ceph_connection *con;
- dout(2, "mark_down peer %u.%u.%u.%u:%u\n",
+ dout("mark_down peer %u.%u.%u.%u:%u\n",
IPQUADPORT(addr->ipaddr));
spin_lock(&msgr->con_lock);
con = __get_connection(msgr, addr);
if (con) {
- dout(1, "mark_down %s%d %u.%u.%u.%u:%u (%p)\n",
+ dout("mark_down %s%d %u.%u.%u.%u:%u (%p)\n",
ENTITY_NAME(con->peer_name),
IPQUADPORT(con->peer_addr.ipaddr), con);
set_bit(CLOSED, &con->state); /* in case there's queued work */
newcon->out_connect.flags = 0;
if (!timeout) {
- dout(10, "ceph_msg_send setting LOSSYTX\n");
+ dout("ceph_msg_send setting LOSSYTX\n");
newcon->out_connect.flags |= CEPH_MSG_CONNECT_LOSSY;
set_bit(LOSSYTX, &newcon->state);
}
con = __get_connection(msgr, &msg->hdr.dst.addr);
if (con) {
put_connection(newcon);
- dout(10, "ceph_msg_send (lost race and) had connection "
+ dout("ceph_msg_send (lost race and) had connection "
"%p to peer %u.%u.%u.%u:%u\n", con,
IPQUADPORT(msg->hdr.dst.addr.ipaddr));
} else {
con->peer_addr = msg->hdr.dst.addr;
con->peer_name = msg->hdr.dst.name;
__register_connection(msgr, con);
- dout(5, "ceph_msg_send new connection %p to peer "
+ dout("ceph_msg_send new connection %p to peer "
"%u.%u.%u.%u:%u\n", con,
IPQUADPORT(msg->hdr.dst.addr.ipaddr));
}
spin_unlock(&msgr->con_lock);
radix_tree_preload_end();
} else {
- dout(10, "ceph_msg_send had connection %p to peer "
+ dout("ceph_msg_send had connection %p to peer "
"%u.%u.%u.%u:%u con->sock=%p\n", con,
IPQUADPORT(msg->hdr.dst.addr.ipaddr), con->sock);
spin_unlock(&msgr->con_lock);
le16_to_cpu(list_entry(con->out_queue.prev,
struct ceph_msg,
list_head)->hdr.type) == CEPH_MSG_PING)) {
- dout(2, "ceph_msg_send dropping dup ping\n");
+ dout("ceph_msg_send dropping dup ping\n");
ceph_msg_put(msg);
} else {
msg->hdr.seq = cpu_to_le64(++con->out_seq);
- dout(1, "----- %p %u to %s%d %d=%s len %d+%d -----\n", msg,
+ dout("----- %p %u to %s%d %d=%s len %d+%d -----\n", msg,
(unsigned)con->out_seq,
ENTITY_NAME(msg->hdr.dst.name), le16_to_cpu(msg->hdr.type),
ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
le32_to_cpu(msg->hdr.front_len),
le32_to_cpu(msg->hdr.data_len));
- dout(2, "ceph_msg_send %p seq %llu for %s%d on %p pgs %d\n",
+ dout("ceph_msg_send %p seq %llu for %s%d on %p pgs %d\n",
msg, le64_to_cpu(msg->hdr.seq),
ENTITY_NAME(msg->hdr.dst.name), con, msg->nr_pages);
list_add_tail(&msg->list_head, &con->out_queue);
ceph_queue_con(con);
put_connection(con);
- dout(30, "ceph_msg_send done\n");
+ dout("ceph_msg_send done\n");
return ret;
}
m->nr_pages = calc_pages_for(page_off, page_len);
m->pages = pages;
- dout(20, "ceph_msg_new %p page %d~%d -> %d\n", m, page_off, page_len,
+ dout("ceph_msg_new %p page %d~%d -> %d\n", m, page_off, page_len,
m->nr_pages);
return m;
void ceph_msg_put(struct ceph_msg *m)
{
- dout(20, "ceph_msg_put %p %d -> %d\n", m, atomic_read(&m->nref),
+ dout("ceph_msg_put %p %d -> %d\n", m, atomic_read(&m->nref),
atomic_read(&m->nref)-1);
if (atomic_read(&m->nref) <= 0) {
pr_err("bad ceph_msg_put on %p %llu %s%d->%s%d %d=%s %d+%d\n",
WARN_ON(1);
}
if (atomic_dec_and_test(&m->nref)) {
- dout(20, "ceph_msg_put last one on %p\n", m);
+ dout("ceph_msg_put last one on %p\n", m);
WARN_ON(!list_empty(&m->list_head));
if (m->front_is_vmalloc)
vfree(m->front.iov_base);
#include "mon_client.h"
#include "ceph_debug.h"
-
-int ceph_debug_mon __read_mostly = -1;
-#define DOUT_MASK DOUT_MASK_MON
-#define DOUT_VAR ceph_debug_mon
#include "super.h"
#include "decode.h"
int i, err = -EINVAL;
ceph_fsid_t fsid;
- dout(30, "monmap_decode %p %p len %d\n", p, end, (int)(end-p));
+ dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
/* The encoded and decoded sizes match. */
m = kmalloc(end-p, GFP_NOFS);
if (p != end)
goto bad;
- dout(30, "monmap_decode epoch %d, num_mon %d\n", m->epoch,
+ dout("monmap_decode epoch %d, num_mon %d\n", m->epoch,
m->num_mon);
for (i = 0; i < m->num_mon; i++)
- dout(30, "monmap_decode mon%d is %u.%u.%u.%u:%u\n", i,
+ dout("monmap_decode mon%d is %u.%u.%u.%u:%u\n", i,
IPQUADPORT(m->mon_inst[i].addr.ipaddr));
return m;
bad:
- dout(30, "monmap_decode failed with %d\n", err);
+ dout("monmap_decode failed with %d\n", err);
kfree(m);
return ERR_PTR(err);
}
struct ceph_mds_getmap *h;
int mon = pick_mon(monc, newmon);
- dout(5, "request_mdsmap from mon%d want %u\n", mon, monc->want_mdsmap);
+ dout("request_mdsmap from mon%d want %u\n", mon, monc->want_mdsmap);
msg = ceph_msg_new(CEPH_MSG_MDS_GETMAP, sizeof(*h), 0, 0, NULL);
if (IS_ERR(msg))
return;
*/
void ceph_monc_request_mdsmap(struct ceph_mon_client *monc, u32 want)
{
- dout(5, "request_mdsmap want %u\n", want);
+ dout("request_mdsmap want %u\n", want);
mutex_lock(&monc->req_mutex);
if (want > monc->want_mdsmap) {
monc->want_mdsmap = want;
mutex_lock(&monc->req_mutex);
if (got < monc->want_mdsmap) {
- dout(5, "got_mdsmap %u < wanted %u\n", got, monc->want_mdsmap);
+ dout("got_mdsmap %u < wanted %u\n", got, monc->want_mdsmap);
ret = -EAGAIN;
} else {
- dout(5, "got_mdsmap %u >= wanted %u\n", got, monc->want_mdsmap);
+ dout("got_mdsmap %u >= wanted %u\n", got, monc->want_mdsmap);
monc->want_mdsmap = 0;
cancel_timeout(&monc->mdsreq);
}
struct ceph_osd_getmap *h;
int mon = pick_mon(monc, newmon);
- dout(5, "request_osdmap from mon%d want %u\n", mon, monc->want_osdmap);
+ dout("request_osdmap from mon%d want %u\n", mon, monc->want_osdmap);
msg = ceph_msg_new(CEPH_MSG_OSD_GETMAP, sizeof(*h), 0, 0, NULL);
if (IS_ERR(msg))
return;
void ceph_monc_request_osdmap(struct ceph_mon_client *monc, u32 want)
{
- dout(5, "request_osdmap want %u\n", want);
+ dout("request_osdmap want %u\n", want);
mutex_lock(&monc->req_mutex);
monc->osdreq.delay = BASE_DELAY_INTERVAL;
monc->want_osdmap = want;
mutex_lock(&monc->req_mutex);
if (got < monc->want_osdmap) {
- dout(5, "got_osdmap %u < wanted %u\n", got, monc->want_osdmap);
+ dout("got_osdmap %u < wanted %u\n", got, monc->want_osdmap);
ret = -EAGAIN;
} else {
- dout(5, "got_osdmap %u >= wanted %u\n", got, monc->want_osdmap);
+ dout("got_osdmap %u >= wanted %u\n", got, monc->want_osdmap);
monc->want_osdmap = 0;
cancel_timeout(&monc->osdreq);
}
int mon = pick_mon(monc, newmon);
struct ceph_client_mount *h;
- dout(5, "request_umount from mon%d\n", mon);
+ dout("request_umount from mon%d\n", mon);
msg = ceph_msg_new(CEPH_MSG_CLIENT_UNMOUNT, sizeof(*h), 0, 0, NULL);
if (IS_ERR(msg))
return;
void ceph_monc_handle_umount(struct ceph_mon_client *monc,
struct ceph_msg *msg)
{
- dout(5, "handle_umount\n");
+ dout("handle_umount\n");
mutex_lock(&monc->req_mutex);
cancel_timeout(&monc->umountreq);
monc->client->mount_state = CEPH_MOUNT_UNMOUNTED;
if (msg->front.iov_len != sizeof(*reply))
goto bad;
tid = le64_to_cpu(reply->tid);
- dout(10, "handle_statfs_reply %p tid %llu\n", msg, tid);
+ dout("handle_statfs_reply %p tid %llu\n", msg, tid);
mutex_lock(&monc->statfs_mutex);
req = radix_tree_lookup(&monc->statfs_request_tree, tid);
struct ceph_mon_statfs *h;
int mon = pick_mon(monc, newmon ? 1 : -1);
- dout(10, "send_statfs to mon%d tid %llu\n", mon, req->tid);
+ dout("send_statfs to mon%d tid %llu\n", mon, req->tid);
msg = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), 0, 0, NULL);
if (IS_ERR(msg))
return PTR_ERR(msg);
int newmon = 1;
struct ceph_mon_statfs_request *req;
- dout(10, "do_statfs_check\n");
+ dout("do_statfs_check\n");
mutex_lock(&monc->statfs_mutex);
while (1) {
got = radix_tree_gang_lookup(&monc->statfs_request_tree,
int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
{
- dout(5, "init\n");
+ dout("init\n");
memset(monc, 0, sizeof(*monc));
monc->client = cl;
monc->monmap = kzalloc(sizeof(struct ceph_monmap) +
void ceph_monc_stop(struct ceph_mon_client *monc)
{
- dout(5, "stop\n");
+ dout("stop\n");
cancel_timeout(&monc->mdsreq);
cancel_timeout(&monc->osdreq);
cancel_timeout(&monc->umountreq);
#include <linux/uaccess.h>
#include "ceph_debug.h"
-
-int ceph_debug_osdc __read_mostly = -1;
-#define DOUT_MASK DOUT_MASK_OSDC
-#define DOUT_VAR ceph_debug_osdc
#include "super.h"
-
#include "osd_client.h"
#include "messenger.h"
#include "crush/mapper.h"
ceph_calc_file_object_mapping(layout, off, plen, &bno,
&objoff, &objlen);
if (*plen < orig_len)
- dout(10, " skipping last %llu, final file extent %llu~%llu\n",
+ dout(" skipping last %llu, final file extent %llu~%llu\n",
orig_len - *plen, off, *plen);
sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno);
op->length = cpu_to_le64(objlen);
req->r_num_pages = calc_pages_for(off, *plen);
- dout(10, "calc_layout %s (%d) %llu~%llu (%d pages)\n",
+ dout("calc_layout %s (%d) %llu~%llu (%d pages)\n",
req->r_oid, req->r_oid_len, objoff, objlen, req->r_num_pages);
}
*/
void ceph_osdc_put_request(struct ceph_osd_request *req)
{
- dout(10, "put_request %p %d -> %d\n", req, atomic_read(&req->r_ref),
+ dout("put_request %p %d -> %d\n", req, atomic_read(&req->r_ref),
atomic_read(&req->r_ref)-1);
BUG_ON(atomic_read(&req->r_ref) <= 0);
if (atomic_dec_and_test(&req->r_ref)) {
req->r_tid = ++osdc->last_tid;
head->tid = cpu_to_le64(req->r_tid);
- dout(30, "register_request %p tid %lld\n", req, req->r_tid);
+ dout("register_request %p tid %lld\n", req, req->r_tid);
rc = radix_tree_insert(&osdc->request_tree, req->r_tid, (void *)req);
if (rc < 0)
goto out;
if (osdc->num_requests == 1) {
osdc->timeout_tid = req->r_tid;
- dout(30, " timeout on tid %llu at %lu\n", req->r_tid,
+ dout(" timeout on tid %llu at %lu\n", req->r_tid,
req->r_timeout_stamp);
schedule_delayed_work(&osdc->timeout_work,
round_jiffies_relative(req->r_timeout_stamp - jiffies));
u64 next_tid = 0;
int got;
- dout(10, "timeout\n");
+ dout("timeout\n");
down_read(&osdc->map_sem);
ceph_monc_request_osdmap(&osdc->client->monc, osdc->osdmap->epoch+1);
.type = cpu_to_le32(CEPH_ENTITY_TYPE_OSD),
.num = cpu_to_le32(req->r_last_osd)
};
- dout(20, " tid %llu (at least) timed out on osd%d\n",
+ dout(" tid %llu (at least) timed out on osd%d\n",
req->r_tid, req->r_last_osd);
radix_tree_insert(&pings, req->r_last_osd, req);
ceph_ping(osdc->client->msgr, n, &req->r_last_osd_addr);
static void __unregister_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
- dout(30, "__unregister_request %p tid %lld\n", req, req->r_tid);
+ dout("__unregister_request %p tid %lld\n", req, req->r_tid);
radix_tree_delete(&osdc->request_tree, req->r_tid);
osdc->num_requests--;
if (req->r_tid == osdc->timeout_tid) {
if (osdc->num_requests == 0) {
- dout(30, "no requests, canceling timeout\n");
+ dout("no requests, canceling timeout\n");
osdc->timeout_tid = 0;
cancel_delayed_work(&osdc->timeout_work);
} else {
(void **)&req, 0, 1);
BUG_ON(ret != 1);
osdc->timeout_tid = req->r_tid;
- dout(30, "rescheduled timeout on tid %llu at %lu\n",
+ dout("rescheduled timeout on tid %llu at %lu\n",
req->r_tid, req->r_timeout_stamp);
schedule_delayed_work(&osdc->timeout_work,
round_jiffies_relative(req->r_timeout_stamp -
osd = osds[i];
break;
}
- dout(20, "map_osds tid %llu pgid %llx pool %d osd%d (was osd%d)\n",
+ dout("map_osds tid %llu pgid %llx pool %d osd%d (was osd%d)\n",
req->r_tid, pgid.pg64, pgid.pg.pool, osd, req->r_last_osd);
if (req->r_last_osd == osd &&
(osd < 0 || ceph_entity_addr_equal(&osdc->osdmap->osd_addr[osd],
map_osds(osdc, req);
if (req->r_last_osd < 0) {
- dout(10, "send_request %p no up osds in pg\n", req);
+ dout("send_request %p no up osds in pg\n", req);
ceph_monc_request_osdmap(&osdc->client->monc,
osdc->osdmap->epoch+1);
return 0;
}
osd = req->r_last_osd;
- dout(10, "send_request %p tid %llu to osd%d flags %d\n",
+ dout("send_request %p tid %llu to osd%d flags %d\n",
req, req->r_tid, osd, req->r_flags);
reqhead = req->r_request->front.iov_base;
if (msg->front.iov_len != sizeof(*rhead) + object_len +
numops * sizeof(struct ceph_osd_op))
goto bad;
- dout(10, "handle_reply %p tid %llu\n", msg, tid);
+ dout("handle_reply %p tid %llu\n", msg, tid);
/* lookup */
mutex_lock(&osdc->request_mutex);
req = radix_tree_lookup(&osdc->request_tree, tid);
if (req == NULL) {
- dout(10, "handle_reply tid %llu dne\n", tid);
+ dout("handle_reply tid %llu dne\n", tid);
mutex_unlock(&osdc->request_mutex);
return;
}
flags = le32_to_cpu(rhead->flags);
if (req->r_aborted) {
- dout(10, "handle_reply tid %llu aborted\n", tid);
+ dout("handle_reply tid %llu aborted\n", tid);
goto done;
}
/* in case we need to replay this op, */
req->r_reassert_version = rhead->reassert_version;
} else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
- dout(10, "handle_reply tid %llu dup ack\n", tid);
+ dout("handle_reply tid %llu dup ack\n", tid);
goto done;
}
- dout(10, "handle_reply tid %llu flags %d\n", tid, flags);
+ dout("handle_reply tid %llu flags %d\n", tid, flags);
/* either this is a read, or we got the safe response */
if ((flags & CEPH_OSD_FLAG_ONDISK) ||
continue; /* no change */
if (req->r_last_osd < 0) {
- dout(20, "tid %llu maps to no valid osd\n", req->r_tid);
+ dout("tid %llu maps to no valid osd\n", req->r_tid);
needmap++; /* request a newer map */
memset(&req->r_last_osd_addr, 0,
sizeof(req->r_last_osd_addr));
}
kick:
- dout(20, "kicking tid %llu osd%d\n", req->r_tid,
+ dout("kicking tid %llu osd%d\n", req->r_tid,
req->r_last_osd);
ceph_osdc_get_request(req);
mutex_unlock(&osdc->request_mutex);
mutex_unlock(&osdc->request_mutex);
if (needmap) {
- dout(10, "%d requests for down osds, need new map\n", needmap);
+ dout("%d requests for down osds, need new map\n", needmap);
ceph_monc_request_osdmap(&osdc->client->monc,
osdc->osdmap->epoch+1);
}
int err;
ceph_fsid_t fsid;
- dout(2, "handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
+ dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
p = msg->front.iov_base;
end = p + msg->front.iov_len;
/* incremental maps */
ceph_decode_32_safe(&p, end, nr_maps, bad);
- dout(10, " %d inc maps\n", nr_maps);
+ dout(" %d inc maps\n", nr_maps);
while (nr_maps > 0) {
ceph_decode_need(&p, end, 2*sizeof(u32), bad);
ceph_decode_32(&p, epoch);
ceph_decode_need(&p, end, maplen, bad);
next = p + maplen;
if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
- dout(10, "applying incremental map %u len %d\n",
+ dout("applying incremental map %u len %d\n",
epoch, maplen);
newmap = apply_incremental(&p, next, osdc->osdmap,
osdc->client->msgr);
osdc->osdmap = newmap;
}
} else {
- dout(10, "ignoring incremental map %u len %d\n",
+ dout("ignoring incremental map %u len %d\n",
epoch, maplen);
}
p = next;
/* full maps */
ceph_decode_32_safe(&p, end, nr_maps, bad);
- dout(30, " %d full maps\n", nr_maps);
+ dout(" %d full maps\n", nr_maps);
while (nr_maps) {
ceph_decode_need(&p, end, 2*sizeof(u32), bad);
ceph_decode_32(&p, epoch);
ceph_decode_32(&p, maplen);
ceph_decode_need(&p, end, maplen, bad);
if (nr_maps > 1) {
- dout(5, "skipping non-latest full map %u len %d\n",
+ dout("skipping non-latest full map %u len %d\n",
epoch, maplen);
} else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
- dout(10, "skipping full map %u len %d, "
+ dout("skipping full map %u len %d, "
"older than our %u\n", epoch, maplen,
osdc->osdmap->epoch);
} else {
- dout(10, "taking full map %u len %d\n", epoch, maplen);
+ dout("taking full map %u len %d\n", epoch, maplen);
newmap = osdmap_decode(&p, p+maplen);
if (IS_ERR(newmap)) {
err = PTR_ERR(newmap);
int ret = -1;
int type = le16_to_cpu(m->hdr.type);
- dout(10, "prepare_pages on msg %p want %d\n", m, want);
+ dout("prepare_pages on msg %p want %d\n", m, want);
if (unlikely(type != CEPH_MSG_OSD_OPREPLY))
return -1; /* hmm! */
mutex_lock(&osdc->request_mutex);
req = radix_tree_lookup(&osdc->request_tree, tid);
if (!req) {
- dout(10, "prepare_pages unknown tid %llu\n", tid);
+ dout("prepare_pages unknown tid %llu\n", tid);
goto out;
}
- dout(10, "prepare_pages tid %llu has %d pages, want %d\n",
+ dout("prepare_pages tid %llu has %d pages, want %d\n",
tid, req->r_num_pages, want);
if (likely(req->r_num_pages >= want && req->r_reply == NULL &&
!req->r_aborted)) {
replyhead = req->r_reply->front.iov_base;
rc = le32_to_cpu(replyhead->result);
bytes = le32_to_cpu(req->r_reply->hdr.data_len);
- dout(10, "wait_request tid %llu result %d, %d bytes\n",
+ dout("wait_request tid %llu result %d, %d bytes\n",
req->r_tid, rc, bytes);
if (rc < 0)
return rc;
{
struct ceph_msg *msg;
- dout(10, "abort_request tid %llu, revoking %p pages\n", req->r_tid,
+ dout("abort_request tid %llu, revoking %p pages\n", req->r_tid,
req->r_request);
/*
* mark req aborted _before_ revoking pages, so that
ceph_osdc_get_request(req);
mutex_unlock(&osdc->request_mutex);
- dout(10, "sync waiting on tid %llu (last is %llu)\n",
+ dout("sync waiting on tid %llu (last is %llu)\n",
req->r_tid, last_tid);
wait_for_completion(&req->r_safe_completion);
mutex_lock(&osdc->request_mutex);
ceph_osdc_put_request(req);
}
mutex_unlock(&osdc->request_mutex);
- dout(10, "sync done (thru tid %llu)\n", last_tid);
+ dout("sync done (thru tid %llu)\n", last_tid);
}
/*
*/
void ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
{
- dout(5, "init\n");
+ dout("init\n");
osdc->client = client;
osdc->osdmap = NULL;
init_rwsem(&osdc->map_sem);
struct page *page;
int rc = 0, read = 0;
- dout(10, "readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
+ dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
vino.snap, off, len);
req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
num_pages = calc_pages_for(off, len);
req->r_num_pages = num_pages;
- dout(10, "readpages final extent is %llu~%llu (%d pages)\n",
+ dout("readpages final extent is %llu~%llu (%d pages)\n",
off, len, req->r_num_pages);
rc = ceph_osdc_start_request(osdc, req);
if (read & ~PAGE_CACHE_MASK) {
i = read >> PAGE_CACHE_SHIFT;
page = pages[i];
- dout(20, "readpages zeroing %d %p from %d\n", i, page,
+ dout("readpages zeroing %d %p from %d\n", i, page,
(int)(read & ~PAGE_CACHE_MASK));
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
zero_user_segment(page, read & ~PAGE_CACHE_MASK,
}
for (i = read >> PAGE_CACHE_SHIFT; i < num_pages; i++) {
page = req->r_pages[i];
- dout(20, "readpages zeroing %d %p\n", i, page);
+ dout("readpages zeroing %d %p\n", i, page);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
#else
}
ceph_osdc_put_request(req);
- dout(10, "readpages result %d\n", rc);
+ dout("readpages result %d\n", rc);
return rc;
}
/* it may be a short write due to an object boundary */
req->r_pages = pages;
req->r_num_pages = calc_pages_for(off, len);
- dout(10, "writepages %llu~%llu (%d pages)\n", off, len,
+ dout("writepages %llu~%llu (%d pages)\n", off, len,
req->r_num_pages);
rc = ceph_osdc_start_request(osdc, req);
ceph_osdc_put_request(req);
if (rc == 0)
rc = len;
- dout(10, "writepages result %d\n", rc);
+ dout("writepages result %d\n", rc);
return rc;
}
#include "osdmap.h"
#include "crush/hash.h"
#include "decode.h"
-
#include "ceph_debug.h"
-int ceph_debug_osdmap __read_mostly = -1;
-#define DOUT_MASK DOUT_MASK_OSDMAP
-#define DOUT_VAR ceph_debug_osdmap
-
char *ceph_osdmap_state_str(char *str, int len, int state)
{
static int crush_decode_uniform_bucket(void **p, void *end,
struct crush_bucket_uniform *b)
{
- dout(30, "crush_decode_uniform_bucket %p to %p\n", *p, end);
+ dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
ceph_decode_32(p, b->item_weight);
return 0;
struct crush_bucket_list *b)
{
int j;
- dout(30, "crush_decode_list_bucket %p to %p\n", *p, end);
+ dout("crush_decode_list_bucket %p to %p\n", *p, end);
b->item_weights = kmalloc(b->h.size * sizeof(u32), GFP_NOFS);
if (b->item_weights == NULL)
return -ENOMEM;
struct crush_bucket_tree *b)
{
int j;
- dout(30, "crush_decode_tree_bucket %p to %p\n", *p, end);
+ dout("crush_decode_tree_bucket %p to %p\n", *p, end);
ceph_decode_32_safe(p, end, b->num_nodes, bad);
b->node_weights = kmalloc(b->num_nodes * sizeof(u32), GFP_NOFS);
if (b->node_weights == NULL)
struct crush_bucket_straw *b)
{
int j;
- dout(30, "crush_decode_straw_bucket %p to %p\n", *p, end);
+ dout("crush_decode_straw_bucket %p to %p\n", *p, end);
b->item_weights = kmalloc(b->h.size * sizeof(u32), GFP_NOFS);
if (b->item_weights == NULL)
return -ENOMEM;
void *start = pbyval;
u32 magic;
- dout(30, "crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
+ dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
c = kzalloc(sizeof(*c), GFP_NOFS);
if (c == NULL)
c->buckets[i] = NULL;
continue;
}
- dout(30, "crush_decode bucket %d off %x %p to %p\n",
+ dout("crush_decode bucket %d off %x %p to %p\n",
i, (int)(*p-start), *p, end);
switch (alg) {
ceph_decode_32(p, b->weight);
ceph_decode_32(p, b->size);
- dout(30, "crush_decode bucket size %d off %x %p to %p\n",
+ dout("crush_decode bucket size %d off %x %p to %p\n",
b->size, (int)(*p-start), *p, end);
b->items = kmalloc(b->size * sizeof(__s32), GFP_NOFS);
}
/* rules */
- dout(30, "rule vec is %p\n", c->rules);
+ dout("rule vec is %p\n", c->rules);
for (i = 0; i < c->max_rules; i++) {
u32 yes;
struct crush_rule *r;
ceph_decode_32_safe(p, end, yes, bad);
if (!yes) {
- dout(30, "crush_decode NO rule %d off %x %p to %p\n",
+ dout("crush_decode NO rule %d off %x %p to %p\n",
i, (int)(*p-start), *p, end);
c->rules[i] = NULL;
continue;
}
- dout(30, "crush_decode rule %d off %x %p to %p\n",
+ dout("crush_decode rule %d off %x %p to %p\n",
i, (int)(*p-start), *p, end);
/* len */
GFP_NOFS);
if (r == NULL)
goto badmem;
- dout(30, " rule %d is at %p\n", i, r);
+ dout(" rule %d is at %p\n", i, r);
r->len = yes;
ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
/* ignore trailing name maps. */
- dout(30, "crush_decode success\n");
+ dout("crush_decode success\n");
return c;
badmem:
err = -ENOMEM;
bad:
- dout(30, "crush_decode fail %d\n", err);
+ dout("crush_decode fail %d\n", err);
crush_destroy(c);
return ERR_PTR(err);
}
*/
void ceph_osdmap_destroy(struct ceph_osdmap *map)
{
- dout(10, "osdmap_destroy %p\n", map);
+ dout("osdmap_destroy %p\n", map);
if (map->crush)
crush_destroy(map->crush);
kfree(map->osd_state);
int err = -EINVAL;
void *start = *p;
- dout(30, "osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p));
+ dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p));
map = kzalloc(sizeof(*map), GFP_NOFS);
if (map == NULL)
err = osdmap_set_max_osd(map, max);
if (err < 0)
goto bad;
- dout(30, "osdmap_decode max_osd = %d\n", map->max_osd);
+ dout("osdmap_decode max_osd = %d\n", map->max_osd);
/* osds */
err = -EINVAL;
/* crush */
ceph_decode_32_safe(p, end, len, bad);
- dout(30, "osdmap_decode crush len %d from off 0x%x\n", len,
+ dout("osdmap_decode crush len %d from off 0x%x\n", len,
(int)(*p - start));
ceph_decode_need(p, end, len, bad);
map->crush = crush_decode(*p, end);
/* ignore the rest of the map */
*p = end;
- dout(30, "osdmap_decode done %p %p\n", *p, end);
+ dout("osdmap_decode done %p %p\n", *p, end);
return map;
bad:
- dout(30, "osdmap_decode fail\n");
+ dout("osdmap_decode fail\n");
ceph_osdmap_destroy(map);
return ERR_PTR(err);
}
/* full map? */
ceph_decode_32_safe(p, end, len, bad);
if (len > 0) {
- dout(20, "apply_incremental full map len %d, %p to %p\n",
+ dout("apply_incremental full map len %d, %p to %p\n",
len, *p, end);
newmap = osdmap_decode(p, min(*p+len, end));
return newmap; /* error or not */
/* new crush? */
ceph_decode_32_safe(p, end, len, bad);
if (len > 0) {
- dout(20, "apply_incremental new crush map len %d, %p to %p\n",
+ dout("apply_incremental new crush map len %d, %p to %p\n",
len, *p, end);
newcrush = crush_decode(*p, min(*p+len, end));
if (IS_ERR(newcrush))
struct ceph_entity_addr addr;
ceph_decode_32_safe(p, end, osd, bad);
ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
- dout(1, "osd%d up\n", osd);
+ pr_info("ceph osd%d up\n", osd);
BUG_ON(osd >= map->max_osd);
map->osd_state[osd] |= CEPH_OSD_UP;
map->osd_addr[osd] = addr;
u32 osd;
ceph_decode_32_safe(p, end, osd, bad);
(*p)++; /* clean flag */
- dout(1, "osd%d down\n", osd);
+ pr_info("ceph osd%d down\n", osd);
if (osd < map->max_osd) {
map->osd_state[osd] &= ~CEPH_OSD_UP;
ceph_messenger_mark_down(msgr, &map->osd_addr[osd]);
ceph_decode_need(p, end, sizeof(u32)*2, bad);
ceph_decode_32(p, osd);
ceph_decode_32(p, off);
- dout(1, "osd%d weight 0x%x %s\n", osd, off,
+ pr_info("ceph osd%d weight 0x%x %s\n", osd, off,
off == CEPH_OSD_IN ? "(in)" :
(off == CEPH_OSD_OUT ? "(out)" : ""));
if (osd < map->max_osd)
u32 su_per_object;
u64 t;
- dout(80, "mapping %llu~%llu osize %u fl_su %u\n", off, *plen,
+ dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen,
osize, su);
su_per_object = osize / le32_to_cpu(layout->fl_stripe_unit);
- dout(80, "osize %u / su %u = su_per_object %u\n", osize, su,
+ dout("osize %u / su %u = su_per_object %u\n", osize, su,
su_per_object);
BUG_ON((su & ~PAGE_MASK) != 0);
t = off;
do_div(t, su);
bl = t;
- dout(80, "off %llu / su %u = bl %u\n", off, su, bl);
+ dout("off %llu / su %u = bl %u\n", off, su, bl);
stripeno = bl / sc;
stripepos = bl % sc;
objsetno = stripeno / su_per_object;
*bno = cpu_to_le32(objsetno * sc + stripepos);
- dout(80, "objset %u * sc %u = bno %u\n", objsetno, sc, (unsigned)*bno);
+ dout("objset %u * sc %u = bno %u\n", objsetno, sc, (unsigned)*bno);
/* *oxoff = *off / layout->fl_stripe_unit; */
t = off;
*oxoff = do_div(t, su);
*oxlen = min_t(u64, *plen, su - *oxoff);
*plen = *oxlen;
- dout(80, " obj extent %llu~%llu\n", *oxoff, *oxlen);
+ dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
}
/*
#include <linux/sort.h>
#include "ceph_debug.h"
-
-int ceph_debug_snap __read_mostly = -1;
-#define DOUT_MASK DOUT_MASK_SNAP
-#define DOUT_VAR ceph_debug_snap
-
#include "super.h"
#include "decode.h"
void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm)
{
- dout(20, "get_realm %p %d -> %d\n", realm,
+ dout("get_realm %p %d -> %d\n", realm,
atomic_read(&realm->nref), atomic_read(&realm->nref)+1);
/*
* since we _only_ increment realm refs or empty the empty
INIT_LIST_HEAD(&realm->empty_item);
INIT_LIST_HEAD(&realm->inodes_with_caps);
spin_lock_init(&realm->inodes_with_caps_lock);
- dout(20, "create_snap_realm %llx %p\n", realm->ino, realm);
+ dout("create_snap_realm %llx %p\n", realm->ino, realm);
return realm;
}
realm = radix_tree_lookup(&mdsc->snap_realms, ino);
if (realm)
- dout(20, "lookup_snap_realm %llx %p\n", realm->ino, realm);
+ dout("lookup_snap_realm %llx %p\n", realm->ino, realm);
return realm;
}
static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm)
{
- dout(10, "__destroy_snap_realm %p %llx\n", realm, realm->ino);
+ dout("__destroy_snap_realm %p %llx\n", realm, realm->ino);
radix_tree_delete(&mdsc->snap_realms, realm->ino);
static void __put_snap_realm(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm)
{
- dout(20, "__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
+ dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
if (atomic_dec_and_test(&realm->nref))
__destroy_snap_realm(mdsc, realm);
void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm)
{
- dout(20, "put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
+ dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
if (!atomic_dec_and_test(&realm->nref))
return;
if (IS_ERR(parent))
return PTR_ERR(parent);
}
- dout(20, "adjust_snap_realm_parent %llx %p: %llx %p -> %llx %p\n",
+ dout("adjust_snap_realm_parent %llx %p: %llx %p -> %llx %p\n",
realm->ino, realm, realm->parent_ino, realm->parent,
parentino, parent);
if (realm->parent) {
realm->cached_context->seq <= realm->seq &&
(!parent ||
realm->cached_context->seq <= parent->cached_context->seq)) {
- dout(10, "build_snap_context %llx %p: %p seq %lld (%d snaps)"
+ dout("build_snap_context %llx %p: %p seq %lld (%d snaps)"
" (unchanged)\n",
realm->ino, realm, realm->cached_context,
realm->cached_context->seq,
sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL);
snapc->num_snaps = num;
- dout(10, "build_snap_context %llx %p: %p seq %lld (%d snaps)\n",
+ dout("build_snap_context %llx %p: %p seq %lld (%d snaps)\n",
realm->ino, realm, snapc, snapc->seq, snapc->num_snaps);
if (realm->cached_context)
{
struct ceph_snap_realm *child;
- dout(10, "rebuild_snap_realms %llx %p\n", realm->ino, realm);
+ dout("rebuild_snap_realms %llx %p\n", realm->ino, realm);
build_snap_context(realm);
list_for_each_entry(child, &realm->children, child_item)
as no new writes are allowed to start when pending, so any
writes in progress now were started before the previous
cap_snap. lucky us. */
- dout(10, "queue_cap_snap %p snapc %p seq %llu used %d"
+ dout("queue_cap_snap %p snapc %p seq %llu used %d"
" already pending\n", inode, snapc, snapc->seq, used);
kfree(capsnap);
} else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) {
list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
if (used & CEPH_CAP_FILE_WR) {
- dout(10, "queue_cap_snap %p cap_snap %p snapc %p"
+ dout("queue_cap_snap %p cap_snap %p snapc %p"
" seq %llu used WR, now pending\n", inode,
capsnap, snapc, snapc->seq);
capsnap->writing = 1;
__ceph_finish_cap_snap(ci, capsnap);
}
} else {
- dout(10, "queue_cap_snap %p nothing dirty|writing\n", inode);
+ dout("queue_cap_snap %p nothing dirty|writing\n", inode);
kfree(capsnap);
}
capsnap->ctime = inode->i_ctime;
capsnap->time_warp_seq = ci->i_time_warp_seq;
if (capsnap->dirty_pages) {
- dout(10, "finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu "
+ dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu "
"still has %d dirty pages\n", inode, capsnap,
capsnap->context, capsnap->context->seq,
capsnap->size, capsnap->dirty_pages);
return 0;
}
- dout(10, "finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu clean\n",
+ dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu clean\n",
inode, capsnap, capsnap->context,
capsnap->context->seq, capsnap->size);
int invalidate = 0;
int err = -ENOMEM;
- dout(10, "update_snap_trace deletion=%d\n", deletion);
+ dout("update_snap_trace deletion=%d\n", deletion);
more:
ceph_decode_need(&p, e, sizeof(*ri), bad);
ri = p;
}
if (le64_to_cpu(ri->seq) > realm->seq) {
- dout(10, "update_snap_trace updating %llx %p %lld -> %lld\n",
+ dout("update_snap_trace updating %llx %p %lld -> %lld\n",
realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
/*
* if the realm seq has changed, queue a cap_snap for every
spin_lock(&realm->inodes_with_caps_lock);
}
spin_unlock(&realm->inodes_with_caps_lock);
- dout(20, "update_snap_trace cap_snaps queued\n");
+ dout("update_snap_trace cap_snaps queued\n");
}
} else {
- dout(10, "update_snap_trace %llx %p seq %lld unchanged\n",
+ dout("update_snap_trace %llx %p seq %lld unchanged\n",
realm->ino, realm, realm->seq);
}
invalidate = 1;
}
- dout(10, "done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
+ dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
realm, invalidate, p, e);
if (p < e)
struct inode *inode;
struct ceph_mds_session *session = NULL;
- dout(10, "flush_snaps\n");
+ dout("flush_snaps\n");
spin_lock(&mdsc->snap_flush_lock);
while (!list_empty(&mdsc->snap_flush_list)) {
ci = list_first_entry(&mdsc->snap_flush_list,
mutex_unlock(&session->s_mutex);
ceph_put_mds_session(session);
}
- dout(10, "flush_snaps done\n");
+ dout("flush_snaps done\n");
}
trace_len = le32_to_cpu(h->trace_len);
p += sizeof(*h);
- dout(10, "handle_snap from mds%d op %s split %llx tracelen %d\n", mds,
+ dout("handle_snap from mds%d op %s split %llx tracelen %d\n", mds,
ceph_snap_op_name(op), split, trace_len);
/* find session */
session = __ceph_lookup_mds_session(mdsc, mds);
mutex_unlock(&mdsc->mutex);
if (!session) {
- dout(10, "WTF, got snap but no session for mds%d\n", mds);
+ dout("WTF, got snap but no session for mds%d\n", mds);
return;
}
}
ceph_get_snap_realm(mdsc, realm);
- dout(10, "splitting snap_realm %llx %p\n", realm->ino, realm);
+ dout("splitting snap_realm %llx %p\n", realm->ino, realm);
for (i = 0; i < num_split_inos; i++) {
struct ceph_vino vino = {
.ino = le64_to_cpu(split_inos[i]),
*/
if (ci->i_snap_realm->created >
le64_to_cpu(ri->created)) {
- dout(15, " leaving %p in newer realm %llx %p\n",
+ dout(" leaving %p in newer realm %llx %p\n",
inode, ci->i_snap_realm->ino,
ci->i_snap_realm);
goto skip_inode;
}
- dout(15, " will move %p to split realm %llx %p\n",
+ dout(" will move %p to split realm %llx %p\n",
inode, realm->ino, realm);
/*
* Remove the inode from the realm's inode
#include "ceph_debug.h"
#include "ceph_ver.h"
#include "decode.h"
-
-/*
- * global debug value.
- * 0 = quiet.
- *
- * if the per-file debug level >= 0, then that overrides this global
- * debug level.
- */
-int ceph_debug __read_mostly = 1;
-int ceph_debug_mask __read_mostly = 0xffffffff;
-/* if true, send output to KERN_INFO (console) instead of KERN_DEBUG. */
-int ceph_debug_console __read_mostly;
-int ceph_debug_super __read_mostly = -1; /* for this file */
-
-#define DOUT_MASK DOUT_MASK_SUPER
-#define DOUT_VAR ceph_debug_super
#include "super.h"
-
#include "mon_client.h"
void ceph_dispatch(void *p, struct ceph_msg *msg);
int rc;
int seconds = 15;
- dout(30, "put_super\n");
+ dout("put_super\n");
ceph_mdsc_close_sessions(&cl->mdsc);
ceph_monc_request_umount(&cl->monc);
__le64 fsid;
int err;
- dout(30, "statfs\n");
+ dout("statfs\n");
err = ceph_monc_do_statfs(&client->monc, &st);
if (err < 0)
return err;
static int ceph_syncfs(struct super_block *sb, int wait)
{
- dout(10, "sync_fs %d\n", wait);
+ dout("sync_fs %d\n", wait);
ceph_osdc_sync(&ceph_client(sb)->osdc);
ceph_mdsc_sync(&ceph_client(sb)->mdsc);
return 0;
struct ceph_client *client = ceph_sb_to_client(mnt->mnt_sb);
struct ceph_mount_args *args = &client->mount_args;
- if (ceph_debug != 0)
- seq_printf(m, ",debug=%d", ceph_debug);
if (args->flags & CEPH_OPT_FSID)
seq_printf(m, ",fsidmajor=%llu,fsidminor%llu",
__ceph_fsid_major(&args->fsid),
struct ceph_client *client = ceph_sb_to_client(sb);
#endif
- dout(30, "ceph_umount_begin\n");
+ dout("ceph_umount_begin\n");
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
if (!(flags & MNT_FORCE))
int err = -EINVAL;
if (client->signed_ticket) {
- dout(2, "handle_mount_ack - already mounted\n");
+ dout("handle_mount_ack - already mounted\n");
return 0;
}
- dout(2, "handle_mount_ack\n");
+ dout("handle_mount_ack\n");
p = msg->front.iov_base;
end = p + msg->front.iov_len;
p += len;
ceph_decode_32_safe(&p, end, len, bad);
- dout(10, "ticket len %d\n", len);
+ dout("ticket len %d\n", len);
ceph_decode_need(&p, end, len, bad);
client->signed_ticket = kmalloc(len, GFP_KERNEL);
client->whoami = le32_to_cpu(msg->hdr.dst.name.num);
client->msgr->inst.name = msg->hdr.dst.name;
- dout(1, "i am client%d, fsid is %llx.%llx\n", client->whoami,
- le64_to_cpu(__ceph_fsid_major(&client->monc.monmap->fsid)),
- le64_to_cpu(__ceph_fsid_minor(&client->monc.monmap->fsid)));
+ pr_info("ceph mount as client%d fsid is %llx.%llx\n", client->whoami,
+ le64_to_cpu(__ceph_fsid_major(&client->monc.monmap->fsid)),
+ le64_to_cpu(__ceph_fsid_minor(&client->monc.monmap->fsid)));
ceph_debugfs_client_init(client);
return 0;
{
struct ceph_client *client = p;
- dout(30, "ceph_peer_reset %s%d\n", ENTITY_NAME(*peer_name));
+ dout("ceph_peer_reset %s%d\n", ENTITY_NAME(*peer_name));
switch (le32_to_cpu(peer_name->type)) {
case CEPH_ENTITY_TYPE_MDS:
ceph_mdsc_handle_reset(&client->mdsc,
enum {
Opt_fsidmajor,
Opt_fsidminor,
- Opt_debug,
- Opt_debug_console,
- Opt_debug_msgr,
- Opt_debug_mdsc,
- Opt_debug_osdc,
- Opt_debug_addr,
- Opt_debug_inode,
- Opt_debug_snap,
- Opt_debug_ioctl,
- Opt_debug_caps,
Opt_monport,
Opt_port,
Opt_wsize,
static match_table_t arg_tokens = {
{Opt_fsidmajor, "fsidmajor=%ld"},
{Opt_fsidminor, "fsidminor=%ld"},
- {Opt_debug, "debug=%d"},
- {Opt_debug_msgr, "debug_msgr=%d"},
- {Opt_debug_mdsc, "debug_mdsc=%d"},
- {Opt_debug_osdc, "debug_osdc=%d"},
- {Opt_debug_addr, "debug_addr=%d"},
- {Opt_debug_inode, "debug_inode=%d"},
- {Opt_debug_snap, "debug_snap=%d"},
- {Opt_debug_ioctl, "debug_ioctl=%d"},
- {Opt_debug_caps, "debug_caps=%d"},
{Opt_monport, "monport=%d"},
{Opt_port, "port=%d"},
{Opt_wsize, "wsize=%d"},
{Opt_readdir_max_entries, "readdir_max_entries=%d"},
/* int args above */
{Opt_ip, "ip=%s"},
- {Opt_debug_console, "debug_console"},
{Opt_noshare, "noshare"},
{Opt_unsafewriteback, "unsafewriteback"},
{Opt_safewriteback, "safewriteback"},
unsigned ip = 0;
const char *p = c, *numstart;
- dout(15, "parse_ip on '%s' len %d\n", c, len);
+ dout("parse_ip on '%s' len %d\n", c, len);
for (mon_count = 0; mon_count < max_count; mon_count++) {
for (i = 0; !ADDR_DELIM(*p) && i < 4; i++) {
v = 0;
} else
addr[mon_count].ipaddr.sin_port = htons(CEPH_MON_PORT);
- dout(15, "parse_ip got %u.%u.%u.%u:%u\n",
+ dout("parse_ip got %u.%u.%u.%u:%u\n",
IPQUADPORT(addr[mon_count].ipaddr));
if (*p != ',')
substring_t argstr[MAX_OPT_ARGS];
int i;
- dout(15, "parse_mount_args dev_name '%s'\n", dev_name);
+ dout("parse_mount_args dev_name '%s'\n", dev_name);
memset(args, 0, sizeof(*args));
/* defaults */
while (*c == '/')
c++; /* remove leading '/'(s) */
*path = c;
- dout(15, "server path '%s'\n", *path);
+ dout("server path '%s'\n", *path);
/* parse mount options */
while ((c = strsep(&options, ",")) != NULL) {
"at '%s'\n", c);
continue;
}
- dout(30, "got token %d intval %d\n", token, intval);
+ dout("got token %d intval %d\n", token, intval);
}
switch (token) {
case Opt_fsidmajor:
args->flags |= CEPH_OPT_MYIP;
break;
- /* debug levels */
- case Opt_debug:
- ceph_debug = intval;
- break;
- case Opt_debug_msgr:
- ceph_debug_msgr = intval;
- break;
- case Opt_debug_mdsc:
- ceph_debug_mdsc = intval;
- break;
- case Opt_debug_osdc:
- ceph_debug_osdc = intval;
- break;
- case Opt_debug_addr:
- ceph_debug_addr = intval;
- break;
- case Opt_debug_inode:
- ceph_debug_inode = intval;
- break;
- case Opt_debug_snap:
- ceph_debug_snap = intval;
- break;
- case Opt_debug_ioctl:
- ceph_debug_ioctl = intval;
- break;
- case Opt_debug_caps:
- ceph_debug_caps = intval;
- break;
- case Opt_debug_console:
- ceph_debug_console = 1;
- break;
-
/* misc */
case Opt_wsize:
args->wsize = intval;
static void ceph_destroy_client(struct ceph_client *client)
{
- dout(10, "destroy_client %p\n", client);
+ dout("destroy_client %p\n", client);
/* unmount */
ceph_mdsc_stop(&client->mdsc);
if (client->msgr)
ceph_messenger_destroy(client->msgr);
kfree(client);
- dout(10, "destroy_client %p done\n", client);
+ dout("destroy_client %p done\n", client);
}
/*
struct dentry *root;
/* open dir */
- dout(30, "open_root_inode opening '%s'\n", path);
+ dout("open_root_inode opening '%s'\n", path);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
if (IS_ERR(req))
return ERR_PTR(PTR_ERR(req));
req->r_num_caps = 2;
err = ceph_mdsc_do_request(mdsc, NULL, req);
if (err == 0) {
- dout(30, "open_root_inode success\n");
+ dout("open_root_inode success\n");
if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
client->sb->s_root == NULL)
root = d_alloc_root(req->r_target_inode);
root = d_alloc_anon(req->r_target_inode);
#endif
req->r_target_inode = NULL;
- dout(30, "open_root_inode success, root dentry is %p\n", root);
+ dout("open_root_inode success, root dentry is %p\n", root);
} else {
root = ERR_PTR(err);
}
unsigned char r;
struct ceph_client_mount *h;
- dout(10, "mount start\n");
+ dout("mount start\n");
mutex_lock(&client->mount_mutex);
/* initialize the messenger */
err = -EIO;
if (timeout && time_after_eq(jiffies, started + timeout))
goto out;
- dout(10, "mount sending mount request\n");
+ dout("mount sending mount request\n");
get_random_bytes(&r, 1);
which = r % client->mount_args.num_mon;
mount_msg = ceph_msg_new(CEPH_MSG_CLIENT_MOUNT, sizeof(*h), 0,
ceph_msg_send(client->msgr, mount_msg, 0);
/* wait */
- dout(10, "mount sent to mon%d, waiting for maps\n", which);
+ dout("mount sent to mon%d, waiting for maps\n", which);
err = wait_event_interruptible_timeout(client->mount_wq,
client->mount_err || have_all_maps(client),
request_interval);
}
- dout(30, "mount opening root\n");
+ dout("mount opening root\n");
root = open_root_dentry(client, "", started);
if (IS_ERR(root)) {
err = PTR_ERR(root);
if (path[0] == 0) {
dget(root);
} else {
- dout(30, "mount opening base mountpoint\n");
+ dout("mount opening base mountpoint\n");
root = open_root_dentry(client, path, started);
if (IS_ERR(root)) {
err = PTR_ERR(root);
mnt->mnt_sb = client->sb;
client->mount_state = CEPH_MOUNT_MOUNTED;
- dout(10, "mount success\n");
+ dout("mount success\n");
err = 0;
out:
struct ceph_client *client = data;
int ret;
- dout(10, "set_super %p data %p\n", s, data);
+ dout("set_super %p data %p\n", s, data);
s->s_flags = client->mount_args.sb_flags;
s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
struct ceph_mount_args *args = &new->mount_args;
struct ceph_client *other = ceph_sb_to_client(sb);
int i;
- dout(10, "ceph_compare_super %p\n", sb);
+ dout("ceph_compare_super %p\n", sb);
/* either compare fsid, or specified mon_hostname */
if (args->flags & CEPH_OPT_FSID) {
if (ceph_fsid_compare(&args->fsid, &other->fsid)) {
- dout(30, "fsid doesn't match\n");
+ dout("fsid doesn't match\n");
return 0;
}
} else {
&args->mon_addr[i]))
break;
if (i == args->num_mon) {
- dout(30, "mon ip not part of monmap\n");
+ dout("mon ip not part of monmap\n");
return 0;
}
- dout(10, "mon ip matches existing sb %p\n", sb);
+ dout("mon ip matches existing sb %p\n", sb);
}
if (args->sb_flags != other->mount_args.sb_flags) {
- dout(30, "flags differ\n");
+ dout("flags differ\n");
return 0;
}
return 1;
int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
const char *path;
- dout(25, "ceph_get_sb\n");
+ dout("ceph_get_sb\n");
/* create client (which we may/may not use) */
client = ceph_create_client();
if (ceph_client(sb) != client) {
ceph_destroy_client(client);
client = ceph_client(sb);
- dout(20, "get_sb got existing client %p\n", client);
+ dout("get_sb got existing client %p\n", client);
} else {
- dout(20, "get_sb using new client %p\n", client);
+ dout("get_sb using new client %p\n", client);
err = ceph_init_bdi(sb, client);
if (err < 0)
goto out_splat;
err = ceph_mount(client, mnt, path);
if (err < 0)
goto out_splat;
- dout(22, "root %p inode %p ino %llx.%llx\n", mnt->mnt_root,
+ dout("root %p inode %p ino %llx.%llx\n", mnt->mnt_root,
mnt->mnt_root->d_inode, ceph_vinop(mnt->mnt_root->d_inode));
return 0;
out:
ceph_destroy_client(client);
out_final:
- dout(25, "ceph_get_sb fail %d\n", err);
+ dout("ceph_get_sb fail %d\n", err);
return err;
}
static void ceph_kill_sb(struct super_block *s)
{
struct ceph_client *client = ceph_sb_to_client(s);
- dout(1, "kill_sb %p\n", s);
+ dout("kill_sb %p\n", s);
ceph_mdsc_pre_umount(&client->mdsc);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
bdi_unregister(&client->backing_dev_info);
{
int ret = 0;
- dout(1, "init_ceph\n");
pr_info("ceph init (%s)\n", STRINGIFY(CEPH_GIT_VER));
ret = ceph_debugfs_init();
static void __exit exit_ceph(void)
{
- dout(1, "exit_ceph\n");
+ dout("exit_ceph\n");
unregister_filesystem(&ceph_fs_type);
ceph_caps_finalize();
destroy_caches();