Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 136952
b: refs/heads/master
c: f2a8205
h: refs/heads/master
v: v3
  • Loading branch information
Tejun Heo committed Feb 20, 2009
1 parent 450ff34 commit 67e8024
Show file tree
Hide file tree
Showing 3 changed files with 42 additions and 39 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 313e458f81ec3852106c5a83830fe0d4f405a71a
refs/heads/master: f2a8205c4ef1af917d175c36a4097ae5587791c8
47 changes: 22 additions & 25 deletions trunk/include/linux/percpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,46 +82,43 @@ struct percpu_data {

#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)

extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
extern void percpu_free(void *__pdata);
/*
* Use this to get to a cpu's version of the per-cpu object
* dynamically allocated. Non-atomic access to the current CPU's
* version should probably be combined with get_cpu()/put_cpu().
*/
#define per_cpu_ptr(ptr, cpu) \
({ \
struct percpu_data *__p = __percpu_disguise(ptr); \
(__typeof__(ptr))__p->ptrs[(cpu)]; \
})

extern void *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata);

#else /* CONFIG_SMP */

#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })

static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
static inline void *__alloc_percpu(size_t size, size_t align)
{
/*
* Can't easily make larger alignment work with kmalloc. WARN
* on it. Larger alignment should only be used for module
* percpu sections on SMP for which this path isn't used.
*/
WARN_ON_ONCE(align > __alignof__(unsigned long long));
return kzalloc(size, gfp);
}

static inline void percpu_free(void *__pdata)
static inline void free_percpu(void *p)
{
kfree(__pdata);
kfree(p);
}

#endif /* CONFIG_SMP */

#define percpu_alloc_mask(size, gfp, mask) \
__percpu_alloc_mask((size), (gfp), &(mask))

#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)

/* (legacy) interface for use without CPU hotplug handling */

#define __alloc_percpu(size, align) percpu_alloc_mask((size), GFP_KERNEL, \
cpu_possible_map)
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
__alignof__(type))
#define free_percpu(ptr) percpu_free((ptr))
/*
* Use this to get to a cpu's version of the per-cpu object dynamically
* allocated. Non-atomic access to the current CPU's version should
* probably be combined with get_cpu()/put_cpu().
*/
#define per_cpu_ptr(ptr, cpu) \
({ \
struct percpu_data *__p = __percpu_disguise(ptr); \
(__typeof__(ptr))__p->ptrs[(cpu)]; \
})

#endif /* __LINUX_PERCPU_H */
32 changes: 19 additions & 13 deletions trunk/mm/allocpercpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -99,45 +99,51 @@ static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
__percpu_populate_mask((__pdata), (size), (gfp), &(mask))

/**
* percpu_alloc_mask - initial setup of per-cpu data
* alloc_percpu - initial setup of per-cpu data
* @size: size of per-cpu object
* @gfp: may sleep or not etc.
* @mask: populate per-data for cpu's selected through mask bits
* @align: alignment
*
* Populating per-cpu data for all online cpu's would be a typical use case,
* which is simplified by the percpu_alloc() wrapper.
* Per-cpu objects are populated with zeroed buffers.
* Allocate dynamic percpu area. Percpu objects are populated with
* zeroed buffers.
*/
void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
void *__alloc_percpu(size_t size, size_t align)
{
/*
* We allocate whole cache lines to avoid false sharing
*/
size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
void *pdata = kzalloc(sz, gfp);
void *pdata = kzalloc(sz, GFP_KERNEL);
void *__pdata = __percpu_disguise(pdata);

/*
* Can't easily make larger alignment work with kmalloc. WARN
* on it. Larger alignment should only be used for module
* percpu sections on SMP for which this path isn't used.
*/
WARN_ON_ONCE(align > __alignof__(unsigned long long));

if (unlikely(!pdata))
return NULL;
if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask)))
if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
&cpu_possible_map)))
return __pdata;
kfree(pdata);
return NULL;
}
EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
EXPORT_SYMBOL_GPL(__alloc_percpu);

/**
* percpu_free - final cleanup of per-cpu data
* free_percpu - final cleanup of per-cpu data
* @__pdata: object to clean up
*
* We simply clean up any per-cpu object left. No need for the client to
* track and specify through a bis mask which per-cpu objects are to free.
*/
void percpu_free(void *__pdata)
void free_percpu(void *__pdata)
{
if (unlikely(!__pdata))
return;
__percpu_depopulate_mask(__pdata, &cpu_possible_map);
kfree(__percpu_disguise(__pdata));
}
EXPORT_SYMBOL_GPL(percpu_free);
EXPORT_SYMBOL_GPL(free_percpu);

0 comments on commit 67e8024

Please sign in to comment.