From 5ef60384803d2de22452e9ee5d531a99c9225002 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 20 Dec 2007 01:29:45 -0800 Subject: [PATCH] --- yaml --- r: 75119 b: refs/heads/master c: 36bb61346d9e64b55285f27363e93a6e96f2abba h: refs/heads/master i: 75117: f0b4081c03e1282b3dfc625fc4dd4695ab951cc9 75115: d8b080645c1517a3b49ce43b908705fd4e8e7039 75111: ad2ad9674c15dd1392306d49c5c04e20952f6eb7 75103: 7ad599af2a5622b3806d15ed9701c25744ddbc8f v: v3 --- [refs] | 2 +- trunk/Makefile | 2 +- trunk/arch/x86/kernel/cpu/intel_cacheinfo.c | 1 - trunk/arch/x86/kernel/traps_32.c | 7 +++-- trunk/drivers/md/Kconfig | 2 +- trunk/drivers/md/dm-crypt.c | 31 +++++++++++---------- trunk/drivers/md/dm-ioctl.c | 12 ++++---- trunk/drivers/md/dm-table.c | 16 +---------- trunk/drivers/md/dm.c | 31 +++++---------------- trunk/drivers/md/dm.h | 7 ----- trunk/fs/binfmt_aout.c | 1 + trunk/fs/xfs/linux-2.6/xfs_file.c | 1 - trunk/fs/xfs/linux-2.6/xfs_iops.c | 4 ++- trunk/include/asm-sparc64/dma-mapping.h | 12 ++------ trunk/include/linux/device-mapper.h | 1 - trunk/kernel/panic.c | 18 ------------ trunk/kernel/sched_rt.c | 2 -- trunk/mm/slub.c | 4 +-- 18 files changed, 47 insertions(+), 107 deletions(-) diff --git a/[refs] b/[refs] index 8cc752625019..09af732a2cbc 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 76be895001f2b0bee42a7685e942d3e08d5dd46c +refs/heads/master: 36bb61346d9e64b55285f27363e93a6e96f2abba diff --git a/trunk/Makefile b/trunk/Makefile index fbb8dfc063d3..c1825aab77e8 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 24 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc5 NAME = Arr Matey! A Hairy Bilge Rat! # *DOCUMENTATION* diff --git a/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c b/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c index 9f530ff43c21..606fe4d55a91 100644 --- a/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -49,7 +49,6 @@ static struct _cache_table cache_table[] __cpuinitdata = { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */ { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */ { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ diff --git a/trunk/arch/x86/kernel/traps_32.c b/trunk/arch/x86/kernel/traps_32.c index c88bbffcaa03..ef6010262597 100644 --- a/trunk/arch/x86/kernel/traps_32.c +++ b/trunk/arch/x86/kernel/traps_32.c @@ -373,13 +373,14 @@ void die(const char * str, struct pt_regs * regs, long err) if (die.lock_owner != raw_smp_processor_id()) { console_verbose(); - raw_local_irq_save(flags); __raw_spin_lock(&die.lock); + raw_local_save_flags(flags); die.lock_owner = smp_processor_id(); die.lock_owner_depth = 0; bust_spinlocks(1); - } else - raw_local_irq_save(flags); + } + else + raw_local_save_flags(flags); if (++die.lock_owner_depth < 3) { unsigned long esp; diff --git a/trunk/drivers/md/Kconfig b/trunk/drivers/md/Kconfig index 3fa7c77d9bd9..9b6fbf044fd8 100644 --- a/trunk/drivers/md/Kconfig +++ b/trunk/drivers/md/Kconfig @@ -269,7 +269,7 @@ config DM_MULTIPATH_RDAC config DM_MULTIPATH_HP tristate "HP MSA multipath support (EXPERIMENTAL)" - depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL + depends on DM_MULTIPATH && BLK_DEV_DM && EXPERIMENTAL ---help--- Multipath support for HP MSA (Active/Passive) series hardware. diff --git a/trunk/drivers/md/dm-crypt.c b/trunk/drivers/md/dm-crypt.c index 6b66ee46b87d..28c6ae095c56 100644 --- a/trunk/drivers/md/dm-crypt.c +++ b/trunk/drivers/md/dm-crypt.c @@ -398,8 +398,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) struct bio *clone; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; - unsigned i, len; - struct page *page; + unsigned int i; clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); if (!clone) @@ -408,8 +407,10 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) clone_init(io, clone); for (i = 0; i < nr_iovecs; i++) { - page = mempool_alloc(cc->page_pool, gfp_mask); - if (!page) + struct bio_vec *bv = bio_iovec_idx(clone, i); + + bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); + if (!bv->bv_page) break; /* @@ -420,14 +421,15 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) if (i == (MIN_BIO_PAGES - 1)) gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; - len = (size > PAGE_SIZE) ? PAGE_SIZE : size; - - if (!bio_add_page(clone, page, len, 0)) { - mempool_free(page, cc->page_pool); - break; - } + bv->bv_offset = 0; + if (size > PAGE_SIZE) + bv->bv_len = PAGE_SIZE; + else + bv->bv_len = size; - size -= len; + clone->bi_size += bv->bv_len; + clone->bi_vcnt++; + size -= bv->bv_len; } if (!clone->bi_size) { @@ -509,9 +511,6 @@ static void crypt_endio(struct bio *clone, int error) struct crypt_config *cc = io->target->private; unsigned read_io = bio_data_dir(clone) == READ; - if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) - error = -EIO; - /* * free the processed pages */ @@ -520,8 +519,10 @@ static void crypt_endio(struct bio *clone, int error) goto out; } - if (unlikely(error)) + if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) { + error = -EIO; goto out; + } bio_put(clone); kcryptd_queue_crypt(io); diff --git a/trunk/drivers/md/dm-ioctl.c b/trunk/drivers/md/dm-ioctl.c index 9627fa0f9470..138200bf5e0b 100644 --- a/trunk/drivers/md/dm-ioctl.c +++ b/trunk/drivers/md/dm-ioctl.c @@ -332,8 +332,6 @@ static int dm_hash_rename(const char *old, const char *new) dm_table_put(table); } - dm_kobject_uevent(hc->md); - dm_put(hc->md); up_write(&_hash_lock); kfree(old_name); @@ -1252,17 +1250,21 @@ static int target_message(struct dm_ioctl *param, size_t param_size) if (!table) goto out_argv; - ti = dm_table_find_target(table, tmsg->sector); - if (!dm_target_is_valid(ti)) { + if (tmsg->sector >= dm_table_get_size(table)) { DMWARN("Target message sector outside device."); r = -EINVAL; - } else if (ti->type->message) + goto out_table; + } + + ti = dm_table_find_target(table, tmsg->sector); + if (ti->type->message) r = ti->type->message(ti, argc, argv); else { DMWARN("Target type does not support messages"); r = -EINVAL; } + out_table: dm_table_put(table); out_argv: kfree(argv); diff --git a/trunk/drivers/md/dm-table.c b/trunk/drivers/md/dm-table.c index 47818d8249cb..e298d8d11f24 100644 --- a/trunk/drivers/md/dm-table.c +++ b/trunk/drivers/md/dm-table.c @@ -99,9 +99,6 @@ static void combine_restrictions_low(struct io_restrictions *lhs, lhs->max_segment_size = min_not_zero(lhs->max_segment_size, rhs->max_segment_size); - lhs->max_hw_sectors = - min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors); - lhs->seg_boundary_mask = min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); @@ -192,10 +189,8 @@ static int alloc_targets(struct dm_table *t, unsigned int num) /* * Allocate both the target array and offset array at once. - * Append an empty entry to catch sectors beyond the end of - * the device. */ - n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + + n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) + sizeof(sector_t)); if (!n_highs) return -ENOMEM; @@ -569,9 +564,6 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) rs->max_segment_size = min_not_zero(rs->max_segment_size, q->max_segment_size); - rs->max_hw_sectors = - min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); - rs->seg_boundary_mask = min_not_zero(rs->seg_boundary_mask, q->seg_boundary_mask); @@ -709,8 +701,6 @@ static void check_for_valid_limits(struct io_restrictions *rs) { if (!rs->max_sectors) rs->max_sectors = SAFE_MAX_SECTORS; - if (!rs->max_hw_sectors) - rs->max_hw_sectors = SAFE_MAX_SECTORS; if (!rs->max_phys_segments) rs->max_phys_segments = MAX_PHYS_SEGMENTS; if (!rs->max_hw_segments) @@ -877,9 +867,6 @@ struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) /* * Search the btree for the correct target. - * - * Caller should check returned pointer with dm_target_is_valid() - * to trap I/O beyond end of device. */ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) { @@ -909,7 +896,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) q->max_hw_segments = t->limits.max_hw_segments; q->hardsect_size = t->limits.hardsect_size; q->max_segment_size = t->limits.max_segment_size; - q->max_hw_sectors = t->limits.max_hw_sectors; q->seg_boundary_mask = t->limits.seg_boundary_mask; q->bounce_pfn = t->limits.bounce_pfn; if (t->limits.no_cluster) diff --git a/trunk/drivers/md/dm.c b/trunk/drivers/md/dm.c index 88c0fd657825..07cbbb8eb3e0 100644 --- a/trunk/drivers/md/dm.c +++ b/trunk/drivers/md/dm.c @@ -672,19 +672,13 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, return clone; } -static int __clone_and_map(struct clone_info *ci) +static void __clone_and_map(struct clone_info *ci) { struct bio *clone, *bio = ci->bio; - struct dm_target *ti; - sector_t len = 0, max; + struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); + sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); struct dm_target_io *tio; - ti = dm_table_find_target(ci->map, ci->sector); - if (!dm_target_is_valid(ti)) - return -EIO; - - max = max_io_len(ci->md, ci->sector, ti); - /* * Allocate a target io object. */ @@ -742,9 +736,6 @@ static int __clone_and_map(struct clone_info *ci) do { if (offset) { ti = dm_table_find_target(ci->map, ci->sector); - if (!dm_target_is_valid(ti)) - return -EIO; - max = max_io_len(ci->md, ci->sector, ti); tio = alloc_tio(ci->md); @@ -768,8 +759,6 @@ static int __clone_and_map(struct clone_info *ci) ci->idx++; } - - return 0; } /* @@ -778,7 +767,6 @@ static int __clone_and_map(struct clone_info *ci) static int __split_bio(struct mapped_device *md, struct bio *bio) { struct clone_info ci; - int error = 0; ci.map = dm_get_table(md); if (unlikely(!ci.map)) @@ -796,11 +784,11 @@ static int __split_bio(struct mapped_device *md, struct bio *bio) ci.idx = bio->bi_idx; start_io_acct(ci.io); - while (ci.sector_count && !error) - error = __clone_and_map(&ci); + while (ci.sector_count) + __clone_and_map(&ci); /* drop the extra reference count */ - dec_pending(ci.io, error); + dec_pending(ci.io, 0); dm_table_put(ci.map); return 0; @@ -1514,7 +1502,7 @@ int dm_resume(struct mapped_device *md) dm_table_unplug_all(map); - dm_kobject_uevent(md); + kobject_uevent(&md->disk->kobj, KOBJ_CHANGE); r = 0; @@ -1528,11 +1516,6 @@ int dm_resume(struct mapped_device *md) /*----------------------------------------------------------------- * Event notification. *---------------------------------------------------------------*/ -void dm_kobject_uevent(struct mapped_device *md) -{ - kobject_uevent(&md->disk->kobj, KOBJ_CHANGE); -} - uint32_t dm_next_uevent_seq(struct mapped_device *md) { return atomic_add_return(1, &md->uevent_seq); diff --git a/trunk/drivers/md/dm.h b/trunk/drivers/md/dm.h index b4584a39383b..4b3faa45277e 100644 --- a/trunk/drivers/md/dm.h +++ b/trunk/drivers/md/dm.h @@ -112,11 +112,6 @@ int dm_table_resume_targets(struct dm_table *t); int dm_table_any_congested(struct dm_table *t, int bdi_bits); void dm_table_unplug_all(struct dm_table *t); -/* - * To check the return value from dm_table_find_target(). - */ -#define dm_target_is_valid(t) ((t)->table) - /*----------------------------------------------------------------- * A registry of target types. *---------------------------------------------------------------*/ @@ -187,6 +182,4 @@ union map_info *dm_get_mapinfo(struct bio *bio); int dm_open_count(struct mapped_device *md); int dm_lock_for_deletion(struct mapped_device *md); -void dm_kobject_uevent(struct mapped_device *md); - #endif diff --git a/trunk/fs/binfmt_aout.c b/trunk/fs/binfmt_aout.c index 7596e1e94cde..e176d195e7e5 100644 --- a/trunk/fs/binfmt_aout.c +++ b/trunk/fs/binfmt_aout.c @@ -319,6 +319,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) current->mm->free_area_cache = current->mm->mmap_base; current->mm->cached_hole_size = 0; + current->mm->mmap = NULL; compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; #ifdef __sparc__ diff --git a/trunk/fs/xfs/linux-2.6/xfs_file.c b/trunk/fs/xfs/linux-2.6/xfs_file.c index 4847eb83fc18..e1fcef2eb928 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_file.c +++ b/trunk/fs/xfs/linux-2.6/xfs_file.c @@ -347,7 +347,6 @@ xfs_file_readdir( size = buf.used; de = (struct hack_dirent *)buf.dirent; - curr_offset = de->offset /* & 0x7fffffff */; while (size > 0) { if (filldir(dirent, de->name, de->namlen, curr_offset & 0x7fffffff, diff --git a/trunk/fs/xfs/linux-2.6/xfs_iops.c b/trunk/fs/xfs/linux-2.6/xfs_iops.c index 5e8bb7f71b5a..37e116779eb1 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_iops.c +++ b/trunk/fs/xfs/linux-2.6/xfs_iops.c @@ -332,7 +332,9 @@ xfs_vn_mknod( ASSERT(vp); ip = vn_to_inode(vp); - if (S_ISDIR(mode)) + if (S_ISCHR(mode) || S_ISBLK(mode)) + ip->i_rdev = rdev; + else if (S_ISDIR(mode)) xfs_validate_fields(ip); d_instantiate(dentry, ip); xfs_validate_fields(dir); diff --git a/trunk/include/asm-sparc64/dma-mapping.h b/trunk/include/asm-sparc64/dma-mapping.h index 1fc655452b81..38cbec76a33f 100644 --- a/trunk/include/asm-sparc64/dma-mapping.h +++ b/trunk/include/asm-sparc64/dma-mapping.h @@ -25,15 +25,9 @@ struct dma_ops { void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction); - void (*sync_single_for_device)(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction); void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction); - void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, - int nelems, - enum dma_data_direction direction); }; extern const struct dma_ops *dma_ops; @@ -105,7 +99,7 @@ static inline void dma_sync_single_for_device(struct device *dev, size_t size, enum dma_data_direction direction) { - dma_ops->sync_single_for_device(dev, dma_handle, size, direction); + /* No flushing needed to sync cpu writes to the device. */ } static inline void dma_sync_single_range_for_cpu(struct device *dev, @@ -123,7 +117,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, size_t size, enum dma_data_direction direction) { - dma_sync_single_for_device(dev, dma_handle+offset, size, direction); + /* No flushing needed to sync cpu writes to the device. */ } @@ -138,7 +132,7 @@ static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { - dma_ops->sync_sg_for_device(dev, sg, nelems, direction); + /* No flushing needed to sync cpu writes to the device. */ } static inline int dma_mapping_error(dma_addr_t dma_addr) diff --git a/trunk/include/linux/device-mapper.h b/trunk/include/linux/device-mapper.h index e765e191663d..b8b7c51389fe 100644 --- a/trunk/include/linux/device-mapper.h +++ b/trunk/include/linux/device-mapper.h @@ -115,7 +115,6 @@ struct io_restrictions { unsigned short max_hw_segments; unsigned short hardsect_size; unsigned int max_segment_size; - unsigned int max_hw_sectors; unsigned long seg_boundary_mask; unsigned long bounce_pfn; unsigned char no_cluster; /* inverted so that 0 is default */ diff --git a/trunk/kernel/panic.c b/trunk/kernel/panic.c index da4d6bac270e..6f6e03e91595 100644 --- a/trunk/kernel/panic.c +++ b/trunk/kernel/panic.c @@ -19,7 +19,6 @@ #include #include #include -#include int panic_on_oops; int tainted; @@ -266,20 +265,6 @@ void oops_enter(void) do_oops_enter_exit(); } -/* - * 64-bit random ID for oopses: - */ -static u64 oops_id; - -static int init_oops_id(void) -{ - if (!oops_id) - get_random_bytes(&oops_id, sizeof(oops_id)); - - return 0; -} -late_initcall(init_oops_id); - /* * Called when the architecture exits its oops handler, after printing * everything. @@ -287,9 +272,6 @@ late_initcall(init_oops_id); void oops_exit(void) { do_oops_enter_exit(); - init_oops_id(); - printk(KERN_WARNING "---[ end trace %016llx ]---\n", - (unsigned long long)oops_id); } #ifdef CONFIG_CC_STACKPROTECTOR diff --git a/trunk/kernel/sched_rt.c b/trunk/kernel/sched_rt.c index 9ba3daa03475..ee9c8b6529e9 100644 --- a/trunk/kernel/sched_rt.c +++ b/trunk/kernel/sched_rt.c @@ -208,8 +208,6 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, static void task_tick_rt(struct rq *rq, struct task_struct *p) { - update_curr_rt(rq); - /* * RR tasks need a special form of timeslice management. * FIFO tasks have no timeslices. diff --git a/trunk/mm/slub.c b/trunk/mm/slub.c index 3655ad359f03..b9f37cb0f2e6 100644 --- a/trunk/mm/slub.c +++ b/trunk/mm/slub.c @@ -172,7 +172,7 @@ static inline void ClearSlabDebug(struct page *page) * Mininum number of partial slabs. These will be left on the partial * lists even if they are empty. kmem_cache_shrink may reclaim them. */ -#define MIN_PARTIAL 5 +#define MIN_PARTIAL 2 /* * Maximum number of desirable partial slabs. @@ -1613,7 +1613,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, * then add it. */ if (unlikely(!prior)) - add_partial_tail(get_node(s, page_to_nid(page)), page); + add_partial(get_node(s, page_to_nid(page)), page); out_unlock: slab_unlock(page);