snapc, snapc->seq, snapc->num_snaps);
} else {
struct list_head *p;
- struct ceph_cap_snap *capsnap = 0;
+ struct ceph_cap_snap *capsnap = NULL;
list_for_each(p, &ci->i_cap_snaps) {
capsnap = list_entry(p, struct ceph_cap_snap,
ci_item);
static int ceph_set_page_dirty_vfs(struct page *page)
{
- return ceph_set_page_dirty(page, 0);
+ return ceph_set_page_dirty(page, NULL);
}
static void ceph_invalidatepage(struct page *page, unsigned long offset)
static int ceph_releasepage(struct page *page, gfp_t g)
{
- struct inode *inode = page->mapping ? page->mapping->host:0;
+ struct inode *inode = page->mapping ? page->mapping->host:NULL;
dout(20, "%p releasepage %p idx %lu\n", inode, page, page->index);
WARN_ON(PageDirty(page));
WARN_ON(page->private);
static struct ceph_snap_context *__get_oldest_context(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_snap_context *snapc = 0;
+ struct ceph_snap_context *snapc = NULL;
struct list_head *p;
- struct ceph_cap_snap *capsnap = 0;
+ struct ceph_cap_snap *capsnap = NULL;
list_for_each(p, &ci->i_cap_snaps) {
capsnap = list_entry(p, struct ceph_cap_snap, ci_item);
static struct ceph_snap_context *get_oldest_context(struct inode *inode)
{
- struct ceph_snap_context *snapc = 0;
+ struct ceph_snap_context *snapc = NULL;
spin_lock(&inode->i_lock);
snapc = __get_oldest_context(inode);
spin_unlock(&inode->i_lock);
/* verify this is a writeable snap context */
snapc = (void *)page->private;
- if (snapc == 0) {
+ if (snapc == NULL) {
dout(20, "writepage %p page %p not dirty?\n", inode, page);
goto out;
}
* lame release_pages helper. release_pages() isn't exported to
* modules.
*/
-void ceph_release_pages(struct page **pages, int num)
+static void ceph_release_pages(struct page **pages, int num)
{
struct pagevec pvec;
int i;
pgoff_t index, start, end;
int range_whole = 0;
int should_loop = 1;
- struct page **pages = 0;
+ struct page **pages = NULL;
pgoff_t max_pages = 0, max_pages_ever = 0;
- struct ceph_snap_context *snapc = 0, *last_snapc = 0;
+ struct ceph_snap_context *snapc = NULL, *last_snapc = NULL;
struct pagevec pvec;
int done = 0;
int rc = 0;
u64 offset, len;
struct ceph_osd_request *req;
- req = 0;
+ req = NULL;
next = 0;
locked_pages = 0;
max_pages = max_pages_ever;
release_pages:
dout(50, "pagevec_release on %d pages (%p)\n", (int)pvec.nr,
- pvec.nr ? pvec.pages[0] : 0);
+ pvec.nr ? pvec.pages[0] : NULL);
pagevec_release(&pvec);
if (locked_pages && !done)
page, snapc);
if (!clear_page_dirty_for_io(page))
goto retry_locked;
- r = writepage_nounlock(page, 0);
+ r = writepage_nounlock(page, NULL);
if (r < 0)
goto fail_nosnap;
goto retry_locked;
#define IPSZ (sizeof(struct ceph_inopath_item) / sizeof(u32))
-int ceph_encode_fh(struct dentry *dentry, __u32 *rawfh, int *max_len,
+static int ceph_encode_fh(struct dentry *dentry, __u32 *rawfh, int *max_len,
int connectable)
{
int type = 1;
if (IS_ERR(path))
return ERR_PTR(PTR_ERR(path));
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_OPEN, pathbase, path,
- 0, 0,
+ 0, NULL,
dentry, want_auth);
kfree(path);
req->r_expected_cap = kmalloc(sizeof(struct ceph_cap), GFP_NOFS);
/* can we re-use existing caps? */
spin_lock(&inode->i_lock);
- if ((__ceph_caps_issued(ci, 0) & wantcaps) == wantcaps) {
+ if ((__ceph_caps_issued(ci, NULL) & wantcaps) == wantcaps) {
dout(10, "open fmode %d caps %d using existing on %p\n",
fmode, wantcaps, inode);
__ceph_get_fmode(ci, fmode);
dentry = d_find_alias(inode);
if (!dentry)
return -ESTALE; /* blech */
- ceph_mdsc_lease_release(mdsc, inode, 0, CEPH_LOCK_ICONTENT);
+ ceph_mdsc_lease_release(mdsc, inode, NULL, CEPH_LOCK_ICONTENT);
req = prepare_open_request(inode->i_sb, dentry, flags, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
if (IS_ERR(req))
return ERR_PTR(PTR_ERR(req));
if (flags & O_CREAT)
- ceph_mdsc_lease_release(mdsc, dir, 0, CEPH_LOCK_ICONTENT);
+ ceph_mdsc_lease_release(mdsc, dir, NULL, CEPH_LOCK_ICONTENT);
dget(dentry); /* to match put_request below */
req->r_last_dentry = dentry; /* use this dentry in fill_trace */
req->r_locked_dir = dir; /* caller holds dir->i_mutex */
* atomically grab references, so that those bits are not released
* mid-read.
*/
-ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
+static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *filp = iocb->ki_filp;
/*
*/
-ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
+static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
if (IS_ERR(path))
return PTR_ERR(path);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LSETLAYOUT,
- pathbase, path, 0, 0,
+ pathbase, path, 0, NULL,
file->f_dentry, USE_ANY_MDS);
kfree(path);
reqh = req->r_request->front.iov_base;
reqh->args.setlayout.layout = layout;
- ceph_mdsc_lease_release(mdsc, inode, 0, CEPH_LOCK_ICONTENT);
+ ceph_mdsc_lease_release(mdsc, inode, NULL, CEPH_LOCK_ICONTENT);
err = ceph_mdsc_do_request(mdsc, req);
ceph_mdsc_put_request(req);
return err;