Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 19388
b: refs/heads/master
c: 6ed5eb2
h: refs/heads/master
v: v3
  • Loading branch information
Pekka Enberg authored and Linus Torvalds committed Feb 1, 2006
1 parent 1b8f646 commit 9e8a66d
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 6 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5295a74cc0bcf1291686eb734ccb06baa3d55c1a
refs/heads/master: 6ed5eb2211204224799b2821656bbbfde26ef200
22 changes: 17 additions & 5 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -596,6 +596,18 @@ static inline struct slab *page_get_slab(struct page *page)
return (struct slab *)page->lru.prev;
}

static inline struct kmem_cache *virt_to_cache(const void *obj)
{
struct page *page = virt_to_page(obj);
return page_get_cache(page);
}

static inline struct slab *virt_to_slab(const void *obj)
{
struct page *page = virt_to_page(obj);
return page_get_slab(page);
}

/* These are the default caches for kmalloc. Custom caches can have other sizes. */
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
Expand Down Expand Up @@ -1437,7 +1449,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
/* Print some data about the neighboring objects, if they
* exist:
*/
struct slab *slabp = page_get_slab(virt_to_page(objp));
struct slab *slabp = virt_to_slab(objp);
int objnr;

objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
Expand Down Expand Up @@ -2767,7 +2779,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
void *objp = objpp[i];
struct slab *slabp;

slabp = page_get_slab(virt_to_page(objp));
slabp = virt_to_slab(objp);
l3 = cachep->nodelists[node];
list_del(&slabp->list);
check_spinlock_acquired_node(cachep, node);
Expand Down Expand Up @@ -2867,7 +2879,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
#ifdef CONFIG_NUMA
{
struct slab *slabp;
slabp = page_get_slab(virt_to_page(objp));
slabp = virt_to_slab(objp);
if (unlikely(slabp->nodeid != numa_node_id())) {
struct array_cache *alien = NULL;
int nodeid = slabp->nodeid;
Expand Down Expand Up @@ -3130,7 +3142,7 @@ void kfree(const void *objp)
return;
local_irq_save(flags);
kfree_debugcheck(objp);
c = page_get_cache(virt_to_page(objp));
c = virt_to_cache(objp);
mutex_debug_check_no_locks_freed(objp, obj_size(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
Expand Down Expand Up @@ -3704,5 +3716,5 @@ unsigned int ksize(const void *objp)
if (unlikely(objp == NULL))
return 0;

return obj_size(page_get_cache(virt_to_page(objp)));
return obj_size(virt_to_cache(objp));
}

0 comments on commit 9e8a66d

Please sign in to comment.