Merge branch 'slab/rcu' into slab/next
authorPekka Enberg <penberg@kernel.org>
Fri, 11 Mar 2011 16:10:45 +0000 (18:10 +0200)
committerPekka Enberg <penberg@kernel.org>
Fri, 11 Mar 2011 16:10:45 +0000 (18:10 +0200)
Conflicts:
mm/slub.c

1  2 
mm/slab.c
mm/slub.c

diff --cc mm/slab.c
Simple merge
diff --cc mm/slub.c
index ea6f0390996f00e3c17ecde274675c2b638b9936,ebba3eb193692cac5cdd3af0e472fa3c1800f80c..e841d8921c22680a013c019fd01dcebbe17de03c
+++ b/mm/slub.c
@@@ -281,35 -281,16 +281,40 @@@ static inline int slab_index(void *p, s
        return (p - addr) / s->size;
  }
  
 +static inline size_t slab_ksize(const struct kmem_cache *s)
 +{
 +#ifdef CONFIG_SLUB_DEBUG
 +      /*
 +       * Debugging requires use of the padding between object
 +       * and whatever may come after it.
 +       */
 +      if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
 +              return s->objsize;
 +
 +#endif
 +      /*
 +       * If we have the need to store the freelist pointer
 +       * back there or track user information then we can
 +       * only use the space before that information.
 +       */
 +      if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
 +              return s->inuse;
 +      /*
 +       * Else we can use all the padding etc for the allocation
 +       */
 +      return s->size;
 +}
 +
+ static inline int order_objects(int order, unsigned long size, int reserved)
+ {
+       return ((PAGE_SIZE << order) - reserved) / size;
+ }
  static inline struct kmem_cache_order_objects oo_make(int order,
-                                               unsigned long size)
+               unsigned long size, int reserved)
  {
        struct kmem_cache_order_objects x = {
-               (order << OO_SHIFT) + (PAGE_SIZE << order) / size
+               (order << OO_SHIFT) + order_objects(order, size, reserved)
        };
  
        return x;