Skip to content

Commit

Permalink
Merge branch 'slub/lockless' of git://git.kernel.org/pub/scm/linux/ke…
Browse files Browse the repository at this point in the history
…rnel/git/penberg/slab-2.6

* 'slub/lockless' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: (21 commits)
  slub: When allocating a new slab also prep the first object
  slub: disable interrupts in cmpxchg_double_slab when falling back to pagelock
  Avoid duplicate _count variables in page_struct
  Revert "SLUB: Fix build breakage in linux/mm_types.h"
  SLUB: Fix build breakage in linux/mm_types.h
  slub: slabinfo update for cmpxchg handling
  slub: Not necessary to check for empty slab on load_freelist
  slub: fast release on full slab
  slub: Add statistics for the case that the current slab does not match the node
  slub: Get rid of the another_slab label
  slub: Avoid disabling interrupts in free slowpath
  slub: Disable interrupts in free_debug processing
  slub: Invert locking and avoid slab lock
  slub: Rework allocator fastpaths
  slub: Pass kmem_cache struct to lock and freeze slab
  slub: explicit list_lock taking
  slub: Add cmpxchg_double_slab()
  mm: Rearrange struct page
  slub: Move page->frozen handling near where the page->freelist handling occurs
  slub: Do not use frozen page flag but a bit in the page counters
  ...
  • Loading branch information
Linus Torvalds committed Jul 30, 2011
2 parents 1d3fe4a + 9e577e8 commit c11abbb
Show file tree
Hide file tree
Showing 5 changed files with 616 additions and 304 deletions.
89 changes: 61 additions & 28 deletions include/linux/mm_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,51 +30,75 @@ struct address_space;
* moment. Note that we have no way to track which tasks are using
* a page, though if it is a pagecache page, rmap structures can tell us
* who is mapping it.
*
* The objects in struct page are organized in double word blocks in
* order to allows us to use atomic double word operations on portions
* of struct page. That is currently only used by slub but the arrangement
* allows the use of atomic double word operations on the flags/mapping
* and lru list pointers also.
*/
struct page {
/* First double word block */
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
atomic_t _count; /* Usage count, see below. */
union {
atomic_t _mapcount; /* Count of ptes mapped in mms,
* to show when page is mapped
* & limit reverse map searches.
struct address_space *mapping; /* If low bit clear, points to
* inode address_space, or NULL.
* If page mapped as anonymous
* memory, low bit is set, and
* it points to anon_vma object:
* see PAGE_MAPPING_ANON below.
*/
struct { /* SLUB */
u16 inuse;
u16 objects;
/* Second double word */
struct {
union {
pgoff_t index; /* Our offset within mapping. */
void *freelist; /* slub first free object */
};

union {
/* Used for cmpxchg_double in slub */
unsigned long counters;

struct {

union {
atomic_t _mapcount; /* Count of ptes mapped in mms,
* to show when page is mapped
* & limit reverse map searches.
*/

struct {
unsigned inuse:16;
unsigned objects:15;
unsigned frozen:1;
};
};
atomic_t _count; /* Usage count, see below. */
};
};
};

/* Third double word block */
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
*/

/* Remainder is not double word aligned */
union {
struct {
unsigned long private; /* Mapping-private opaque data:
* usually used for buffer_heads
* if PagePrivate set; used for
* swp_entry_t if PageSwapCache;
* indicates order in the buddy
* system if PG_buddy is set.
*/
struct address_space *mapping; /* If low bit clear, points to
* inode address_space, or NULL.
* If page mapped as anonymous
* memory, low bit is set, and
* it points to anon_vma object:
* see PAGE_MAPPING_ANON below.
*/
};
#if USE_SPLIT_PTLOCKS
spinlock_t ptl;
spinlock_t ptl;
#endif
struct kmem_cache *slab; /* SLUB: Pointer to slab */
struct page *first_page; /* Compound tail pages */
};
union {
pgoff_t index; /* Our offset within mapping. */
void *freelist; /* SLUB: freelist req. slab lock */
struct kmem_cache *slab; /* SLUB: Pointer to slab */
struct page *first_page; /* Compound tail pages */
};
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
*/

/*
* On machines where all RAM is mapped into kernel address space,
* we can simply calculate the virtual address. On machines with
Expand All @@ -100,7 +124,16 @@ struct page {
*/
void *shadow;
#endif
};
}
/*
* If another subsystem starts using the double word pairing for atomic
* operations on struct page then it must change the #if to ensure
* proper alignment of the page struct.
*/
#if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL)
__attribute__((__aligned__(2*sizeof(unsigned long))))
#endif
;

typedef unsigned long __nocast vm_flags_t;

Expand Down
5 changes: 0 additions & 5 deletions include/linux/page-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,9 +124,6 @@ enum pageflags {

/* SLOB */
PG_slob_free = PG_private,

/* SLUB */
PG_slub_frozen = PG_active,
};

#ifndef __GENERATING_BOUNDS_H
Expand Down Expand Up @@ -212,8 +209,6 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)

__PAGEFLAG(SlobFree, slob_free)

__PAGEFLAG(SlubFrozen, slub_frozen)

/*
* Private page markings that may be used by the filesystem that owns the page
* for its own purposes.
Expand Down
3 changes: 3 additions & 0 deletions include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,18 @@ enum stat_item {
ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */
ALLOC_SLAB, /* Cpu slab acquired from page allocator */
ALLOC_REFILL, /* Refill cpu slab from slab freelist */
ALLOC_NODE_MISMATCH, /* Switching cpu slab */
FREE_SLAB, /* Slab freed to the page allocator */
CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
DEACTIVATE_BYPASS, /* Implicit deactivation */
ORDER_FALLBACK, /* Number of times fallback was necessary */
CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
NR_SLUB_STAT_ITEMS };

struct kmem_cache_cpu {
Expand Down
Loading

0 comments on commit c11abbb

Please sign in to comment.