#ifdef CONFIG_CGROUP_MEM_RES_CTLR
 
-extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask);
+extern int mem_cgroup_charge_migrate_fixup(struct page *page,
+                               struct mm_struct *mm, gfp_t gfp_mask);
+/* for swap handling */
+extern int mem_cgroup_try_charge(struct mm_struct *mm,
+               gfp_t gfp_mask, struct mem_cgroup **ptr);
+extern void mem_cgroup_commit_charge_swapin(struct page *page,
+                                       struct mem_cgroup *ptr);
+extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
+
 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                        gfp_t gfp_mask);
 extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru);
 
 
 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
-static inline int mem_cgroup_charge(struct page *page,
+struct mem_cgroup;
+
+static inline int mem_cgroup_newpage_charge(struct page *page,
                                        struct mm_struct *mm, gfp_t gfp_mask)
 {
        return 0;
        return 0;
 }
 
+static inline int mem_cgroup_charge_migrate_fixup(struct page *page,
+                                       struct mm_struct *mm, gfp_t gfp_mask)
+{
+       return 0;
+}
+
+static inline int mem_cgroup_try_charge(struct mm_struct *mm,
+                               gfp_t gfp_mask, struct mem_cgroup **ptr)
+{
+       return 0;
+}
+
+static inline void mem_cgroup_commit_charge_swapin(struct page *page,
+                                         struct mem_cgroup *ptr)
+{
+}
+
+static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
+{
+}
+
 static inline void mem_cgroup_uncharge_page(struct page *page)
 {
 }
 
        return nr_taken;
 }
 
-/*
- * Charge the memory controller for page usage.
- * Return
- * 0 if the charge was successful
- * < 0 if the cgroup is over its limit
+
+/**
+ * mem_cgroup_try_charge - get charge of PAGE_SIZE.
+ * @mm: an mm_struct which is charged against. (when *memcg is NULL)
+ * @gfp_mask: gfp_mask for reclaim.
+ * @memcg: a pointer to memory cgroup which is charged against.
+ *
+ * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated
+ * memory cgroup from @mm is got and stored in *memcg.
+ *
+ * Returns 0 if success. -ENOMEM at failure.
  */
-static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
-                               gfp_t gfp_mask, enum charge_type ctype,
-                               struct mem_cgroup *memcg)
+
+int mem_cgroup_try_charge(struct mm_struct *mm,
+                       gfp_t gfp_mask, struct mem_cgroup **memcg)
 {
        struct mem_cgroup *mem;
-       struct page_cgroup *pc;
-       unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
-       struct mem_cgroup_per_zone *mz;
-       unsigned long flags;
-
-       pc = lookup_page_cgroup(page);
-       /* can happen at boot */
-       if (unlikely(!pc))
-               return 0;
-       prefetchw(pc);
+       int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
        /*
         * We always charge the cgroup the mm_struct belongs to.
         * The mm_struct's mem_cgroup changes on task migration if the
         * thread group leader migrates. It's possible that mm is not
         * set, if so charge the init_mm (happens for pagecache usage).
         */
-
-       if (likely(!memcg)) {
+       if (likely(!*memcg)) {
                rcu_read_lock();
                mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
                if (unlikely(!mem)) {
                 * For every charge from the cgroup, increment reference count
                 */
                css_get(&mem->css);
+               *memcg = mem;
                rcu_read_unlock();
        } else {
-               mem = memcg;
-               css_get(&memcg->css);
+               mem = *memcg;
+               css_get(&mem->css);
        }
 
+
        while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) {
                if (!(gfp_mask & __GFP_WAIT))
-                       goto out;
+                       goto nomem;
 
                if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
                        continue;
 
                if (!nr_retries--) {
                        mem_cgroup_out_of_memory(mem, gfp_mask);
-                       goto out;
+                       goto nomem;
                }
        }
+       return 0;
+nomem:
+       css_put(&mem->css);
+       return -ENOMEM;
+}
+
+/*
+ * commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be
+ * USED state. If already USED, uncharge and return.
+ */
+
+static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
+                                    struct page_cgroup *pc,
+                                    enum charge_type ctype)
+{
+       struct mem_cgroup_per_zone *mz;
+       unsigned long flags;
 
+       /* try_charge() can return NULL to *memcg, taking care of it. */
+       if (!mem)
+               return;
 
        lock_page_cgroup(pc);
        if (unlikely(PageCgroupUsed(pc))) {
                unlock_page_cgroup(pc);
                res_counter_uncharge(&mem->res, PAGE_SIZE);
                css_put(&mem->css);
-
-               goto done;
+               return;
        }
        pc->mem_cgroup = mem;
        /*
        __mem_cgroup_add_list(mz, pc);
        spin_unlock_irqrestore(&mz->lru_lock, flags);
        unlock_page_cgroup(pc);
+}
 
-done:
+/*
+ * Charge the memory controller for page usage.
+ * Return
+ * 0 if the charge was successful
+ * < 0 if the cgroup is over its limit
+ */
+static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
+                               gfp_t gfp_mask, enum charge_type ctype,
+                               struct mem_cgroup *memcg)
+{
+       struct mem_cgroup *mem;
+       struct page_cgroup *pc;
+       int ret;
+
+       pc = lookup_page_cgroup(page);
+       /* can happen at boot */
+       if (unlikely(!pc))
+               return 0;
+       prefetchw(pc);
+
+       mem = memcg;
+       ret = mem_cgroup_try_charge(mm, gfp_mask, &mem);
+       if (ret)
+               return ret;
+
+       __mem_cgroup_commit_charge(mem, pc, ctype);
        return 0;
-out:
-       css_put(&mem->css);
-       return -ENOMEM;
 }
 
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
+int mem_cgroup_newpage_charge(struct page *page,
+                             struct mm_struct *mm, gfp_t gfp_mask)
 {
        if (mem_cgroup_subsys.disabled)
                return 0;
                                MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
 }
 
+/*
+ * same as mem_cgroup_newpage_charge(), now.
+ * But what we assume is different from newpage, and this is special case.
+ * treat this in special function. easy for maintenance.
+ */
+
+int mem_cgroup_charge_migrate_fixup(struct page *page,
+                               struct mm_struct *mm, gfp_t gfp_mask)
+{
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+
+       if (PageCompound(page))
+               return 0;
+
+       if (page_mapped(page) || (page->mapping && !PageAnon(page)))
+               return 0;
+
+       if (unlikely(!mm))
+               mm = &init_mm;
+
+       return mem_cgroup_charge_common(page, mm, gfp_mask,
+                               MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
+}
+
+
+
+
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
                                MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
 }
 
+
+void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+{
+       struct page_cgroup *pc;
+
+       if (mem_cgroup_subsys.disabled)
+               return;
+       if (!ptr)
+               return;
+       pc = lookup_page_cgroup(page);
+       __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
+}
+
+void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
+{
+       if (mem_cgroup_subsys.disabled)
+               return;
+       if (!mem)
+               return;
+       res_counter_uncharge(&mem->res, PAGE_SIZE);
+       css_put(&mem->css);
+}
+
+
 /*
  * uncharge if !page_mapped(page)
  */
 
        cow_user_page(new_page, old_page, address, vma);
        __SetPageUptodate(new_page);
 
-       if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
+       if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
                goto oom_free_new;
 
        /*
        struct page *page;
        swp_entry_t entry;
        pte_t pte;
+       struct mem_cgroup *ptr = NULL;
        int ret = 0;
 
        if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
-       if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+       if (mem_cgroup_try_charge(mm, GFP_KERNEL, &ptr) == -ENOMEM) {
                ret = VM_FAULT_OOM;
                unlock_page(page);
                goto out;
        flush_icache_page(vma, page);
        set_pte_at(mm, address, page_table, pte);
        page_add_anon_rmap(page, vma, address);
+       mem_cgroup_commit_charge_swapin(page, ptr);
 
        swap_free(entry);
        if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
 out:
        return ret;
 out_nomap:
-       mem_cgroup_uncharge_page(page);
+       mem_cgroup_cancel_charge_swapin(ptr);
        pte_unmap_unlock(page_table, ptl);
        unlock_page(page);
        page_cache_release(page);
                goto oom;
        __SetPageUptodate(page);
 
-       if (mem_cgroup_charge(page, mm, GFP_KERNEL))
+       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
                goto oom_free_page;
 
        entry = mk_pte(page, vma->vm_page_prot);
                                ret = VM_FAULT_OOM;
                                goto out;
                        }
-                       if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+                       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
                                ret = VM_FAULT_OOM;
                                page_cache_release(page);
                                goto out;
 
 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, swp_entry_t entry, struct page *page)
 {
+       struct mem_cgroup *ptr = NULL;
        spinlock_t *ptl;
        pte_t *pte;
        int ret = 1;
 
-       if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
+       if (mem_cgroup_try_charge(vma->vm_mm, GFP_KERNEL, &ptr))
                ret = -ENOMEM;
 
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
                if (ret > 0)
-                       mem_cgroup_uncharge_page(page);
+                       mem_cgroup_cancel_charge_swapin(ptr);
                ret = 0;
                goto out;
        }
        set_pte_at(vma->vm_mm, addr, pte,
                   pte_mkold(mk_pte(page, vma->vm_page_prot)));
        page_add_anon_rmap(page, vma, addr);
+       mem_cgroup_commit_charge_swapin(page, ptr);
        swap_free(entry);
        /*
         * Move the page to the active list so it is not