Skip to content

Commit

Permalink
mm: page_alloc: use unsigned int for order in more places
Browse files Browse the repository at this point in the history
X86 prefers the use of unsigned types for iterators and there is a
tendency to mix whether a signed or unsigned type if used for page order.
This converts a number of sites in mm/page_alloc.c to use unsigned int for
order where possible.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Jan Kara <jack@suse.cz>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Mel Gorman authored and Linus Torvalds committed Jun 4, 2014
1 parent cfc47a2 commit 7aeb09f
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 24 deletions.
8 changes: 4 additions & 4 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -817,10 +817,10 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
extern struct mutex zonelists_mutex;
void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags);
bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx, int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx, int alloc_flags);
enum memmap_context {
MEMMAP_EARLY,
MEMMAP_HOTPLUG,
Expand Down
43 changes: 23 additions & 20 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,8 @@ static int destroy_compound_page(struct page *page, unsigned long order)
return bad;
}

static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
static inline void prep_zero_page(struct page *page, unsigned int order,
gfp_t gfp_flags)
{
int i;

Expand Down Expand Up @@ -453,7 +454,7 @@ static inline void set_page_guard_flag(struct page *page) { }
static inline void clear_page_guard_flag(struct page *page) { }
#endif

static inline void set_page_order(struct page *page, int order)
static inline void set_page_order(struct page *page, unsigned int order)
{
set_page_private(page, order);
__SetPageBuddy(page);
Expand Down Expand Up @@ -504,7 +505,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
* For recording page's order, we use page_private(page).
*/
static inline int page_is_buddy(struct page *page, struct page *buddy,
int order)
unsigned int order)
{
if (!pfn_valid_within(page_to_pfn(buddy)))
return 0;
Expand Down Expand Up @@ -726,7 +727,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,

static void free_one_page(struct zone *zone,
struct page *page, unsigned long pfn,
int order,
unsigned int order,
int migratetype)
{
spin_lock(&zone->lock);
Expand Down Expand Up @@ -897,7 +898,7 @@ static inline int check_new_page(struct page *page)
return 0;
}

static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
{
int i;

Expand Down Expand Up @@ -1108,16 +1109,17 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,

/* Remove an element from the buddy allocator from the fallback list */
static inline struct page *
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
{
struct free_area *area;
int current_order;
unsigned int current_order;
struct page *page;
int migratetype, new_type, i;

/* Find the largest possible block of pages in the other list */
for (current_order = MAX_ORDER-1; current_order >= order;
--current_order) {
for (current_order = MAX_ORDER-1;
current_order >= order && current_order <= MAX_ORDER-1;
--current_order) {
for (i = 0;; i++) {
migratetype = fallbacks[start_migratetype][i];

Expand Down Expand Up @@ -1345,7 +1347,7 @@ void mark_free_pages(struct zone *zone)
{
unsigned long pfn, max_zone_pfn;
unsigned long flags;
int order, t;
unsigned int order, t;
struct list_head *curr;

if (zone_is_empty(zone))
Expand Down Expand Up @@ -1541,8 +1543,8 @@ int split_free_page(struct page *page)
*/
static inline
struct page *buffered_rmqueue(struct zone *preferred_zone,
struct zone *zone, int order, gfp_t gfp_flags,
int migratetype)
struct zone *zone, unsigned int order,
gfp_t gfp_flags, int migratetype)
{
unsigned long flags;
struct page *page;
Expand Down Expand Up @@ -1691,8 +1693,9 @@ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
* Return true if free pages are above 'mark'. This takes into account the order
* of the allocation.
*/
static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags, long free_pages)
static bool __zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx, int alloc_flags,
long free_pages)
{
/* free_pages my go negative - that's OK */
long min = mark;
Expand Down Expand Up @@ -1726,15 +1729,15 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
return true;
}

bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int classzone_idx, int alloc_flags)
{
return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
zone_page_state(z, NR_FREE_PAGES));
}

bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags)
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx, int alloc_flags)
{
long free_pages = zone_page_state(z, NR_FREE_PAGES);

Expand Down Expand Up @@ -4121,7 +4124,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,

static void __meminit zone_init_free_lists(struct zone *zone)
{
int order, t;
unsigned int order, t;
for_each_migratetype_order(order, t) {
INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
zone->free_area[order].nr_free = 0;
Expand Down Expand Up @@ -6444,7 +6447,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
{
struct page *page;
struct zone *zone;
int order, i;
unsigned int order, i;
unsigned long pfn;
unsigned long flags;
/* find the first valid pfn */
Expand Down Expand Up @@ -6496,7 +6499,7 @@ bool is_free_buddy_page(struct page *page)
struct zone *zone = page_zone(page);
unsigned long pfn = page_to_pfn(page);
unsigned long flags;
int order;
unsigned int order;

spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
Expand Down

0 comments on commit 7aeb09f

Please sign in to comment.