diff --git a/[refs] b/[refs] index 28eef07d9ad0..b33a42f8240b 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 57b552ba0b2faf7cce66d476ef8ce7f6210c62fd +refs/heads/master: 7a410e8d4d97457c8c381e2de9cdc7bd3306badc diff --git a/trunk/drivers/base/memory.c b/trunk/drivers/base/memory.c index db0848e54cc6..2f8691511190 100644 --- a/trunk/drivers/base/memory.c +++ b/trunk/drivers/base/memory.c @@ -429,16 +429,12 @@ static inline int memory_fail_init(void) * differentiation between which *physical* devices each * section belongs to... */ -int __weak arch_get_memory_phys_device(unsigned long start_pfn) -{ - return 0; -} static int add_memory_block(int nid, struct mem_section *section, - unsigned long state, enum mem_add_context context) + unsigned long state, int phys_device, + enum mem_add_context context) { struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL); - unsigned long start_pfn; int ret = 0; if (!mem) @@ -447,8 +443,7 @@ static int add_memory_block(int nid, struct mem_section *section, mem->phys_index = __section_nr(section); mem->state = state; mutex_init(&mem->state_mutex); - start_pfn = section_nr_to_pfn(mem->phys_index); - mem->phys_device = arch_get_memory_phys_device(start_pfn); + mem->phys_device = phys_device; ret = register_memory(mem, section); if (!ret) @@ -520,7 +515,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section, */ int register_new_memory(int nid, struct mem_section *section) { - return add_memory_block(nid, section, MEM_OFFLINE, HOTPLUG); + return add_memory_block(nid, section, MEM_OFFLINE, 0, HOTPLUG); } int unregister_memory_section(struct mem_section *section) @@ -553,7 +548,7 @@ int __init memory_dev_init(void) if (!present_section_nr(i)) continue; err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, - BOOT); + 0, BOOT); if (!ret) ret = err; } diff --git a/trunk/drivers/char/hpet.c b/trunk/drivers/char/hpet.c index 9c5eea3ea4de..e481c5938bad 100644 --- a/trunk/drivers/char/hpet.c +++ b/trunk/drivers/char/hpet.c @@ -215,7 +215,9 @@ static void hpet_timer_set_irq(struct hpet_dev *devp) else v &= ~0xffff; - for_each_set_bit(irq, &v, HPET_MAX_IRQ) { + for (irq = find_first_bit(&v, HPET_MAX_IRQ); irq < HPET_MAX_IRQ; + irq = find_next_bit(&v, HPET_MAX_IRQ, 1 + irq)) { + if (irq >= nr_irqs) { irq = HPET_MAX_IRQ; break; diff --git a/trunk/drivers/pcmcia/vrc4171_card.c b/trunk/drivers/pcmcia/vrc4171_card.c index c9fcbdc164ea..aaccdb9f4ba1 100644 --- a/trunk/drivers/pcmcia/vrc4171_card.c +++ b/trunk/drivers/pcmcia/vrc4171_card.c @@ -105,6 +105,7 @@ typedef struct vrc4171_socket { char name[24]; int csc_irq; int io_irq; + spinlock_t lock; } vrc4171_socket_t; static vrc4171_socket_t vrc4171_sockets[CARD_MAX_SLOTS]; @@ -327,7 +328,7 @@ static int pccard_set_socket(struct pcmcia_socket *sock, socket_state_t *state) slot = sock->sock; socket = &vrc4171_sockets[slot]; - spin_lock_irq(&sock->lock); + spin_lock_irq(&socket->lock); voltage = set_Vcc_value(state->Vcc); exca_write_byte(slot, CARD_VOLTAGE_SELECT, voltage); @@ -370,7 +371,7 @@ static int pccard_set_socket(struct pcmcia_socket *sock, socket_state_t *state) cscint |= I365_CSC_DETECT; exca_write_byte(slot, I365_CSCINT, cscint); - spin_unlock_irq(&sock->lock); + spin_unlock_irq(&socket->lock); return 0; } diff --git a/trunk/drivers/s390/char/sclp_cmd.c b/trunk/drivers/s390/char/sclp_cmd.c index fc7ae05ce48a..b3beab610da4 100644 --- a/trunk/drivers/s390/char/sclp_cmd.c +++ b/trunk/drivers/s390/char/sclp_cmd.c @@ -704,13 +704,6 @@ int sclp_chp_deconfigure(struct chp_id chpid) return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); } -int arch_get_memory_phys_device(unsigned long start_pfn) -{ - if (!rzm) - return 0; - return PFN_PHYS(start_pfn) / rzm; -} - struct chp_info_sccb { struct sccb_header header; u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; diff --git a/trunk/fs/jffs2/readinode.c b/trunk/fs/jffs2/readinode.c index d32ee9412cb9..e22de8397b74 100644 --- a/trunk/fs/jffs2/readinode.c +++ b/trunk/fs/jffs2/readinode.c @@ -567,7 +567,7 @@ static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) else BUG(); } } - *list = RB_ROOT; + list->rb_node = NULL; } static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) diff --git a/trunk/fs/ntfs/super.c b/trunk/fs/ntfs/super.c index 0de1db6cddbf..1cf39dfaee7a 100644 --- a/trunk/fs/ntfs/super.c +++ b/trunk/fs/ntfs/super.c @@ -31,7 +31,6 @@ #include #include #include -#include #include "sysctl.h" #include "logfile.h" @@ -2459,6 +2458,7 @@ static void ntfs_put_super(struct super_block *sb) static s64 get_nr_free_clusters(ntfs_volume *vol) { s64 nr_free = vol->nr_clusters; + u32 *kaddr; struct address_space *mapping = vol->lcnbmp_ino->i_mapping; struct page *page; pgoff_t index, max_index; @@ -2477,8 +2477,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", max_index, PAGE_CACHE_SIZE / 4); for (index = 0; index < max_index; index++) { - unsigned long *kaddr; - + unsigned int i; /* * Read the page from page cache, getting it from backing store * if necessary, and increment the use count. @@ -2491,16 +2490,16 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) nr_free -= PAGE_CACHE_SIZE * 8; continue; } - kaddr = kmap_atomic(page, KM_USER0); + kaddr = (u32*)kmap_atomic(page, KM_USER0); /* - * Subtract the number of set bits. If this + * For each 4 bytes, subtract the number of set bits. If this * is the last page and it is partial we don't really care as * it just means we do a little extra work but it won't affect * the result as all out of range bytes are set to zero by * ntfs_readpage(). */ - nr_free -= bitmap_weight(kaddr, - PAGE_CACHE_SIZE * BITS_PER_BYTE); + for (i = 0; i < PAGE_CACHE_SIZE / 4; i++) + nr_free -= (s64)hweight32(kaddr[i]); kunmap_atomic(kaddr, KM_USER0); page_cache_release(page); } @@ -2539,6 +2538,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, s64 nr_free, const pgoff_t max_index) { + u32 *kaddr; struct address_space *mapping = vol->mftbmp_ino->i_mapping; struct page *page; pgoff_t index; @@ -2548,8 +2548,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " "0x%lx.", max_index, PAGE_CACHE_SIZE / 4); for (index = 0; index < max_index; index++) { - unsigned long *kaddr; - + unsigned int i; /* * Read the page from page cache, getting it from backing store * if necessary, and increment the use count. @@ -2562,16 +2561,16 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, nr_free -= PAGE_CACHE_SIZE * 8; continue; } - kaddr = kmap_atomic(page, KM_USER0); + kaddr = (u32*)kmap_atomic(page, KM_USER0); /* - * Subtract the number of set bits. If this + * For each 4 bytes, subtract the number of set bits. If this * is the last page and it is partial we don't really care as * it just means we do a little extra work but it won't affect * the result as all out of range bytes are set to zero by * ntfs_readpage(). */ - nr_free -= bitmap_weight(kaddr, - PAGE_CACHE_SIZE * BITS_PER_BYTE); + for (i = 0; i < PAGE_CACHE_SIZE / 4; i++) + nr_free -= (s64)hweight32(kaddr[i]); kunmap_atomic(kaddr, KM_USER0); page_cache_release(page); } diff --git a/trunk/include/linux/memory.h b/trunk/include/linux/memory.h index 85582e1bcee9..1adfe779eb99 100644 --- a/trunk/include/linux/memory.h +++ b/trunk/include/linux/memory.h @@ -36,8 +36,6 @@ struct memory_block { struct sys_device sysdev; }; -int arch_get_memory_phys_device(unsigned long start_pfn); - /* These states are exposed to userspace as text strings in sysfs */ #define MEM_ONLINE (1<<0) /* exposed to userspace */ #define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */ diff --git a/trunk/mm/page_cgroup.c b/trunk/mm/page_cgroup.c index 6c0081441a32..3dd88539a0e6 100644 --- a/trunk/mm/page_cgroup.c +++ b/trunk/mm/page_cgroup.c @@ -284,7 +284,6 @@ static DEFINE_MUTEX(swap_cgroup_mutex); struct swap_cgroup_ctrl { struct page **map; unsigned long length; - spinlock_t lock; }; struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; @@ -354,22 +353,16 @@ unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, struct swap_cgroup_ctrl *ctrl; struct page *mappage; struct swap_cgroup *sc; - unsigned long flags; - unsigned short retval; ctrl = &swap_cgroup_ctrl[type]; mappage = ctrl->map[idx]; sc = page_address(mappage); sc += pos; - spin_lock_irqsave(&ctrl->lock, flags); - retval = sc->id; - if (retval == old) - sc->id = new; + if (cmpxchg(&sc->id, old, new) == old) + return old; else - retval = 0; - spin_unlock_irqrestore(&ctrl->lock, flags); - return retval; + return 0; } /** @@ -390,17 +383,13 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) struct page *mappage; struct swap_cgroup *sc; unsigned short old; - unsigned long flags; ctrl = &swap_cgroup_ctrl[type]; mappage = ctrl->map[idx]; sc = page_address(mappage); sc += pos; - spin_lock_irqsave(&ctrl->lock, flags); - old = sc->id; - sc->id = id; - spin_unlock_irqrestore(&ctrl->lock, flags); + old = xchg(&sc->id, id); return old; } @@ -452,7 +441,6 @@ int swap_cgroup_swapon(int type, unsigned long max_pages) mutex_lock(&swap_cgroup_mutex); ctrl->length = length; ctrl->map = array; - spin_lock_init(&ctrl->lock); if (swap_cgroup_prepare(type)) { /* memory shortage */ ctrl->map = NULL;