From 821b26eae4f5a7f48da6660d8da22479f022dbf3 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 9 Feb 2010 12:31:47 +1100 Subject: [PATCH] --- yaml --- r: 180385 b: refs/heads/master c: 9eb07c259207d048e3ee8be2a77b2a4680b1edd4 h: refs/heads/master i: 180383: 748641e9b97b4d9416dc887edbe75c606f4f0c02 v: v3 --- [refs] | 2 +- trunk/Documentation/cpu-freq/governors.txt | 4 +- trunk/arch/microblaze/kernel/setup.c | 1 - trunk/arch/powerpc/platforms/pseries/xics.c | 14 +- trunk/arch/sh/kernel/cpu/sh3/entry.S | 3 +- trunk/arch/sh/kernel/dwarf.c | 20 --- trunk/arch/sh/kernel/entry-common.S | 8 +- .../arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 3 +- trunk/drivers/cpufreq/cpufreq_ondemand.c | 3 - trunk/drivers/md/raid5.c | 11 +- trunk/drivers/media/dvb/dvb-core/dmxdev.c | 2 +- trunk/drivers/media/dvb/dvb-core/dvb_demux.c | 20 +-- trunk/drivers/usb/host/r8a66597-hcd.c | 41 ++--- trunk/fs/9p/v9fs.c | 33 ++-- trunk/fs/9p/v9fs_vfs.h | 1 - trunk/fs/9p/vfs_file.c | 19 --- trunk/fs/9p/vfs_inode.c | 2 +- trunk/fs/nfsd/export.c | 10 +- trunk/fs/ocfs2/aops.c | 4 +- trunk/fs/ocfs2/buffer_head_io.c | 2 +- trunk/fs/ocfs2/cluster/heartbeat.c | 6 +- trunk/fs/ocfs2/cluster/tcp.c | 10 +- trunk/fs/ocfs2/cluster/tcp_internal.h | 4 +- trunk/fs/ocfs2/dlm/dlmapi.h | 2 +- trunk/fs/ocfs2/dlm/dlmast.c | 2 +- trunk/fs/ocfs2/dlm/dlmconvert.c | 2 +- trunk/fs/ocfs2/dlm/dlmdebug.c | 2 +- trunk/fs/ocfs2/dlm/dlmdomain.c | 2 +- trunk/fs/ocfs2/dlm/dlmlock.c | 2 +- trunk/fs/ocfs2/dlm/dlmmaster.c | 38 ++--- trunk/fs/ocfs2/dlm/dlmrecovery.c | 147 +++++------------- trunk/fs/ocfs2/dlm/dlmunlock.c | 8 +- trunk/fs/ocfs2/dlmglue.c | 85 ++-------- trunk/fs/ocfs2/export.c | 2 +- trunk/fs/ocfs2/extent_map.c | 2 +- trunk/fs/ocfs2/file.c | 18 +-- trunk/fs/ocfs2/inode.c | 4 +- trunk/fs/ocfs2/ioctl.c | 14 +- trunk/fs/ocfs2/journal.c | 2 +- trunk/fs/ocfs2/ocfs2.h | 4 - trunk/fs/ocfs2/ocfs2_fs.h | 11 +- trunk/fs/ocfs2/refcounttree.c | 12 +- trunk/fs/ocfs2/stack_o2cb.c | 12 +- trunk/fs/ocfs2/super.c | 2 +- trunk/fs/ocfs2/symlink.c | 10 +- trunk/fs/ocfs2/uptodate.c | 4 +- trunk/net/9p/client.c | 53 +++---- trunk/net/9p/trans_fd.c | 10 +- trunk/net/9p/trans_rdma.c | 9 +- trunk/net/9p/trans_virtio.c | 4 +- 50 files changed, 217 insertions(+), 469 deletions(-) diff --git a/[refs] b/[refs] index 395c2de3c2b2..b4bb084db6e1 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 3af9cf11b6efb82aa7a1a24e2382b75d43631c4e +refs/heads/master: 9eb07c259207d048e3ee8be2a77b2a4680b1edd4 diff --git a/trunk/Documentation/cpu-freq/governors.txt b/trunk/Documentation/cpu-freq/governors.txt index 737988fca64d..aed082f49d09 100644 --- a/trunk/Documentation/cpu-freq/governors.txt +++ b/trunk/Documentation/cpu-freq/governors.txt @@ -145,8 +145,8 @@ show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT. up_threshold: defines what the average CPU usage between the samplings of 'sampling_rate' needs to be for the kernel to make a decision on whether it should increase the frequency. For example when it is set -to its default value of '95' it means that between the checking -intervals the CPU needs to be on average more than 95% in use to then +to its default value of '80' it means that between the checking +intervals the CPU needs to be on average more than 80% in use to then decide that the CPU frequency needs to be increased. ignore_nice_load: this parameter takes a value of '0' or '1'. When diff --git a/trunk/arch/microblaze/kernel/setup.c b/trunk/arch/microblaze/kernel/setup.c index bb8c4b9ccb80..5372b24ad049 100644 --- a/trunk/arch/microblaze/kernel/setup.c +++ b/trunk/arch/microblaze/kernel/setup.c @@ -54,7 +54,6 @@ void __init setup_arch(char **cmdline_p) microblaze_cache_init(); - invalidate_dcache(); enable_dcache(); invalidate_icache(); diff --git a/trunk/arch/powerpc/platforms/pseries/xics.c b/trunk/arch/powerpc/platforms/pseries/xics.c index f5f79196721c..1ee66db003be 100644 --- a/trunk/arch/powerpc/platforms/pseries/xics.c +++ b/trunk/arch/powerpc/platforms/pseries/xics.c @@ -784,13 +784,9 @@ static void xics_set_cpu_priority(unsigned char cppr) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); - /* - * we only really want to set the priority when there's - * just one cppr value on the stack - */ - WARN_ON(os_cppr->index != 0); + BUG_ON(os_cppr->index != 0); - os_cppr->stack[0] = cppr; + os_cppr->stack[os_cppr->index] = cppr; if (firmware_has_feature(FW_FEATURE_LPAR)) lpar_cppr_info(cppr); @@ -825,14 +821,8 @@ void xics_setup_cpu(void) void xics_teardown_cpu(void) { - struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); int cpu = smp_processor_id(); - /* - * we have to reset the cppr index to 0 because we're - * not going to return from the IPI - */ - os_cppr->index = 0; xics_set_cpu_priority(0); /* Clear any pending IPI request */ diff --git a/trunk/arch/sh/kernel/cpu/sh3/entry.S b/trunk/arch/sh/kernel/cpu/sh3/entry.S index f6a389c996cb..3f7e2a22c7c2 100644 --- a/trunk/arch/sh/kernel/cpu/sh3/entry.S +++ b/trunk/arch/sh/kernel/cpu/sh3/entry.S @@ -132,6 +132,7 @@ ENTRY(tlb_protection_violation_store) mov #1, r5 call_handle_tlbmiss: + setup_frame_reg mov.l 1f, r0 mov r5, r8 mov.l @r0, r6 @@ -364,8 +365,6 @@ handle_exception: mov.l @k2, k2 ! read out vector and keep in k2 handle_exception_special: - setup_frame_reg - ! Setup return address and jump to exception handler mov.l 7f, r9 ! fetch return address stc r2_bank, r0 ! k2 (vector) diff --git a/trunk/arch/sh/kernel/dwarf.c b/trunk/arch/sh/kernel/dwarf.c index e51168064e56..88d28ec3780a 100644 --- a/trunk/arch/sh/kernel/dwarf.c +++ b/trunk/arch/sh/kernel/dwarf.c @@ -540,8 +540,6 @@ void dwarf_free_frame(struct dwarf_frame *frame) mempool_free(frame, dwarf_frame_pool); } -extern void ret_from_irq(void); - /** * dwarf_unwind_stack - unwind the stack * @@ -680,24 +678,6 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, addr = frame->cfa + reg->addr; frame->return_addr = __raw_readl(addr); - /* - * Ah, the joys of unwinding through interrupts. - * - * Interrupts are tricky - the DWARF info needs to be _really_ - * accurate and unfortunately I'm seeing a lot of bogus DWARF - * info. For example, I've seen interrupts occur in epilogues - * just after the frame pointer (r14) had been restored. The - * problem was that the DWARF info claimed that the CFA could be - * reached by using the value of the frame pointer before it was - * restored. - * - * So until the compiler can be trusted to produce reliable - * DWARF info when it really matters, let's stop unwinding once - * we've calculated the function that was interrupted. - */ - if (prev && prev->pc == (unsigned long)ret_from_irq) - frame->return_addr = 0; - return frame; bail: diff --git a/trunk/arch/sh/kernel/entry-common.S b/trunk/arch/sh/kernel/entry-common.S index 2b15ae60c3a0..f0abd58c3a69 100644 --- a/trunk/arch/sh/kernel/entry-common.S +++ b/trunk/arch/sh/kernel/entry-common.S @@ -70,14 +70,8 @@ ret_from_exception: CFI_STARTPROC simple CFI_DEF_CFA r14, 0 CFI_REL_OFFSET 17, 64 - CFI_REL_OFFSET 15, 60 + CFI_REL_OFFSET 15, 0 CFI_REL_OFFSET 14, 56 - CFI_REL_OFFSET 13, 52 - CFI_REL_OFFSET 12, 48 - CFI_REL_OFFSET 11, 44 - CFI_REL_OFFSET 10, 40 - CFI_REL_OFFSET 9, 36 - CFI_REL_OFFSET 8, 32 preempt_stop() ENTRY(ret_from_irq) ! diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 6e44519960c8..f125e5c551c0 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1356,7 +1356,6 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) kfree(data->powernow_table); kfree(data); - per_cpu(powernow_data, pol->cpu) = NULL; return 0; } @@ -1376,7 +1375,7 @@ static unsigned int powernowk8_get(unsigned int cpu) int err; if (!data) - return 0; + return -EINVAL; smp_call_function_single(cpu, query_values_on_cpu, &err, true); if (err) diff --git a/trunk/drivers/cpufreq/cpufreq_ondemand.c b/trunk/drivers/cpufreq/cpufreq_ondemand.c index bd444dc93cf2..4b34ade2332b 100644 --- a/trunk/drivers/cpufreq/cpufreq_ondemand.c +++ b/trunk/drivers/cpufreq/cpufreq_ondemand.c @@ -554,9 +554,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential); - if (freq_next < policy->min) - freq_next = policy->min; - if (!dbs_tuners_ins.powersave_bias) { __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c index e84204eb12df..b5629c3e14fa 100644 --- a/trunk/drivers/md/raid5.c +++ b/trunk/drivers/md/raid5.c @@ -5464,11 +5464,11 @@ static int raid5_start_reshape(mddev_t *mddev) !test_bit(Faulty, &rdev->flags)) { if (raid5_add_disk(mddev, rdev) == 0) { char nm[20]; - if (rdev->raid_disk >= conf->previous_raid_disks) + if (rdev->raid_disk >= conf->previous_raid_disks) { set_bit(In_sync, &rdev->flags); - else + added_devices++; + } else rdev->recovery_offset = 0; - added_devices++; sprintf(nm, "rd%d", rdev->raid_disk); if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) @@ -5480,9 +5480,12 @@ static int raid5_start_reshape(mddev_t *mddev) break; } + /* When a reshape changes the number of devices, ->degraded + * is measured against the large of the pre and post number of + * devices.*/ if (mddev->delta_disks > 0) { spin_lock_irqsave(&conf->device_lock, flags); - mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) + mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) - added_devices; spin_unlock_irqrestore(&conf->device_lock, flags); } diff --git a/trunk/drivers/media/dvb/dvb-core/dmxdev.c b/trunk/drivers/media/dvb/dvb-core/dmxdev.c index 9ddc57909d49..c37790ad92d0 100644 --- a/trunk/drivers/media/dvb/dvb-core/dmxdev.c +++ b/trunk/drivers/media/dvb/dvb-core/dmxdev.c @@ -761,6 +761,7 @@ static int dvb_demux_open(struct inode *inode, struct file *file) dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); dmxdevfilter->type = DMXDEV_TYPE_NONE; dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); + INIT_LIST_HEAD(&dmxdevfilter->feed.ts); init_timer(&dmxdevfilter->timer); dvbdev->users++; @@ -886,7 +887,6 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev, dmxdevfilter->type = DMXDEV_TYPE_PES; memcpy(&dmxdevfilter->params, params, sizeof(struct dmx_pes_filter_params)); - INIT_LIST_HEAD(&dmxdevfilter->feed.ts); dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); diff --git a/trunk/drivers/media/dvb/dvb-core/dvb_demux.c b/trunk/drivers/media/dvb/dvb-core/dvb_demux.c index 67f189b7aa1f..b78cfb7d1897 100644 --- a/trunk/drivers/media/dvb/dvb-core/dvb_demux.c +++ b/trunk/drivers/media/dvb/dvb-core/dvb_demux.c @@ -426,7 +426,16 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) }; }; - if (demux->cnt_storage) { + if (dvb_demux_tscheck) { + if (!demux->cnt_storage) + demux->cnt_storage = vmalloc(MAX_PID + 1); + + if (!demux->cnt_storage) { + printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n"); + dvb_demux_tscheck = 0; + goto no_dvb_demux_tscheck; + } + /* check pkt counter */ if (pid < MAX_PID) { if (buf[1] & 0x80) @@ -445,6 +454,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) }; /* end check */ }; +no_dvb_demux_tscheck: list_for_each_entry(feed, &demux->feed_list, list_head) { if ((feed->pid != pid) && (feed->pid != 0x2000)) @@ -1236,7 +1246,6 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed)); if (!dvbdemux->feed) { vfree(dvbdemux->filter); - dvbdemux->filter = NULL; return -ENOMEM; } for (i = 0; i < dvbdemux->filternum; i++) { @@ -1248,13 +1257,6 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) dvbdemux->feed[i].index = i; } - if (dvb_demux_tscheck) { - dvbdemux->cnt_storage = vmalloc(MAX_PID + 1); - - if (!dvbdemux->cnt_storage) - printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n"); - } - INIT_LIST_HEAD(&dvbdemux->frontend_list); for (i = 0; i < DMX_TS_PES_OTHER; i++) { diff --git a/trunk/drivers/usb/host/r8a66597-hcd.c b/trunk/drivers/usb/host/r8a66597-hcd.c index bee558aed427..0ceec123ddfd 100644 --- a/trunk/drivers/usb/host/r8a66597-hcd.c +++ b/trunk/drivers/usb/host/r8a66597-hcd.c @@ -35,9 +35,7 @@ #include #include #include -#include #include -#include #include "../core/hcd.h" #include "r8a66597.h" @@ -822,26 +820,6 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb, enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); } -static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb, - int status) -__releases(r8a66597->lock) -__acquires(r8a66597->lock) -{ - if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) { - void *ptr; - - for (ptr = urb->transfer_buffer; - ptr < urb->transfer_buffer + urb->transfer_buffer_length; - ptr += PAGE_SIZE) - flush_dcache_page(virt_to_page(ptr)); - } - - usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); - spin_unlock(&r8a66597->lock); - usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status); - spin_lock(&r8a66597->lock); -} - /* this function must be called with interrupt disabled */ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) { @@ -860,9 +838,15 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) list_del(&td->queue); kfree(td); - if (urb) - r8a66597_urb_done(r8a66597, urb, -ENODEV); + if (urb) { + usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), + urb); + spin_unlock(&r8a66597->lock); + usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, + -ENODEV); + spin_lock(&r8a66597->lock); + } break; } } @@ -1022,8 +1006,6 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port, /* this function must be called with interrupt disabled */ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, u16 syssts) -__releases(r8a66597->lock) -__acquires(r8a66597->lock) { if (syssts == SE0) { r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); @@ -1041,9 +1023,7 @@ __acquires(r8a66597->lock) usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); } - spin_unlock(&r8a66597->lock); usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); - spin_lock(&r8a66597->lock); } /* this function must be called with interrupt disabled */ @@ -1303,7 +1283,10 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock) if (usb_pipeisoc(urb->pipe)) urb->start_frame = r8a66597_get_frame(hcd); - r8a66597_urb_done(r8a66597, urb, status); + usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); + spin_unlock(&r8a66597->lock); + usb_hcd_giveback_urb(hcd, urb, status); + spin_lock(&r8a66597->lock); } if (restart) { diff --git a/trunk/fs/9p/v9fs.c b/trunk/fs/9p/v9fs.c index 7d6c2139891d..cf62b05e296a 100644 --- a/trunk/fs/9p/v9fs.c +++ b/trunk/fs/9p/v9fs.c @@ -84,7 +84,7 @@ static const match_table_t tokens = { static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) { - char *options, *tmp_options; + char *options; substring_t args[MAX_OPT_ARGS]; char *p; int option = 0; @@ -102,12 +102,9 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) if (!opts) return 0; - tmp_options = kstrdup(opts, GFP_KERNEL); - if (!tmp_options) { - ret = -ENOMEM; + options = kstrdup(opts, GFP_KERNEL); + if (!options) goto fail_option_alloc; - } - options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -162,12 +159,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) break; case Opt_cache: s = match_strdup(&args[0]); - if (!s) { - ret = -ENOMEM; - P9_DPRINTK(P9_DEBUG_ERROR, - "problem allocating copy of cache arg\n"); - goto free_and_return; - } + if (!s) + goto fail_option_alloc; if (strcmp(s, "loose") == 0) v9ses->cache = CACHE_LOOSE; @@ -180,12 +173,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) case Opt_access: s = match_strdup(&args[0]); - if (!s) { - ret = -ENOMEM; - P9_DPRINTK(P9_DEBUG_ERROR, - "problem allocating copy of access arg\n"); - goto free_and_return; - } + if (!s) + goto fail_option_alloc; v9ses->flags &= ~V9FS_ACCESS_MASK; if (strcmp(s, "user") == 0) @@ -205,11 +194,13 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) continue; } } + kfree(options); + return ret; -free_and_return: - kfree(tmp_options); fail_option_alloc: - return ret; + P9_DPRINTK(P9_DEBUG_ERROR, + "failed to allocate copy of option argument\n"); + return -ENOMEM; } /** diff --git a/trunk/fs/9p/v9fs_vfs.h b/trunk/fs/9p/v9fs_vfs.h index ed835836e0dc..3a7560e35865 100644 --- a/trunk/fs/9p/v9fs_vfs.h +++ b/trunk/fs/9p/v9fs_vfs.h @@ -60,4 +60,3 @@ void v9fs_dentry_release(struct dentry *); int v9fs_uflags2omode(int uflags, int extended); ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64); -void v9fs_blank_wstat(struct p9_wstat *wstat); diff --git a/trunk/fs/9p/vfs_file.c b/trunk/fs/9p/vfs_file.c index 74a0461a9ac0..3902bf43a088 100644 --- a/trunk/fs/9p/vfs_file.c +++ b/trunk/fs/9p/vfs_file.c @@ -257,23 +257,6 @@ v9fs_file_write(struct file *filp, const char __user * data, return total; } -static int v9fs_file_fsync(struct file *filp, struct dentry *dentry, - int datasync) -{ - struct p9_fid *fid; - struct p9_wstat wstat; - int retval; - - P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp, - dentry, datasync); - - fid = filp->private_data; - v9fs_blank_wstat(&wstat); - - retval = p9_client_wstat(fid, &wstat); - return retval; -} - static const struct file_operations v9fs_cached_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, @@ -283,7 +266,6 @@ static const struct file_operations v9fs_cached_file_operations = { .release = v9fs_dir_release, .lock = v9fs_file_lock, .mmap = generic_file_readonly_mmap, - .fsync = v9fs_file_fsync, }; const struct file_operations v9fs_file_operations = { @@ -294,5 +276,4 @@ const struct file_operations v9fs_file_operations = { .release = v9fs_dir_release, .lock = v9fs_file_lock, .mmap = generic_file_readonly_mmap, - .fsync = v9fs_file_fsync, }; diff --git a/trunk/fs/9p/vfs_inode.c b/trunk/fs/9p/vfs_inode.c index a407fa3388c0..9d03d1ebca6f 100644 --- a/trunk/fs/9p/vfs_inode.c +++ b/trunk/fs/9p/vfs_inode.c @@ -176,7 +176,7 @@ int v9fs_uflags2omode(int uflags, int extended) * */ -void +static void v9fs_blank_wstat(struct p9_wstat *wstat) { wstat->type = ~0; diff --git a/trunk/fs/nfsd/export.c b/trunk/fs/nfsd/export.c index a0c4016413f1..c487810a2366 100644 --- a/trunk/fs/nfsd/export.c +++ b/trunk/fs/nfsd/export.c @@ -1316,11 +1316,19 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path) static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp) { + struct svc_export *exp; u32 fsidv[2]; mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL); - return rqst_exp_find(rqstp, FSID_NUM, fsidv); + exp = rqst_exp_find(rqstp, FSID_NUM, fsidv); + /* + * We shouldn't have accepting an nfsv4 request at all if we + * don't have a pseudoexport!: + */ + if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT) + exp = ERR_PTR(-ESERVERFAULT); + return exp; } /* diff --git a/trunk/fs/ocfs2/aops.c b/trunk/fs/ocfs2/aops.c index 7e9df11260f4..3dae4a13f6e4 100644 --- a/trunk/fs/ocfs2/aops.c +++ b/trunk/fs/ocfs2/aops.c @@ -599,7 +599,7 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, return ret; } -/* +/* * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're * particularly interested in the aio/dio case. Like the core uses * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from @@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw, ret = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, - nr_segs, + nr_segs, ocfs2_direct_IO_get_blocks, ocfs2_dio_end_io); diff --git a/trunk/fs/ocfs2/buffer_head_io.c b/trunk/fs/ocfs2/buffer_head_io.c index 21c808f752d8..d43d34a1dd31 100644 --- a/trunk/fs/ocfs2/buffer_head_io.c +++ b/trunk/fs/ocfs2/buffer_head_io.c @@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, } ocfs2_metadata_cache_io_unlock(ci); - mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", + mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", (unsigned long long)block, nr, ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes", flags); diff --git a/trunk/fs/ocfs2/cluster/heartbeat.c b/trunk/fs/ocfs2/cluster/heartbeat.c index 5c9890006708..eda5b8bcddd5 100644 --- a/trunk/fs/ocfs2/cluster/heartbeat.c +++ b/trunk/fs/ocfs2/cluster/heartbeat.c @@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type); unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; -/* Only sets a new threshold if there are no active regions. +/* Only sets a new threshold if there are no active regions. * * No locking or otherwise interesting code is required for reading * o2hb_dead_threshold as it can't change once regions are active and @@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work) mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " "milliseconds\n", reg->hr_dev_name, - jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); + jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); o2quo_disk_timeout(); } @@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg, "seq %llu last %llu changed %u equal %u\n", slot->ds_node_num, (long long)slot->ds_last_generation, le32_to_cpu(hb_block->hb_cksum), - (unsigned long long)le64_to_cpu(hb_block->hb_seq), + (unsigned long long)le64_to_cpu(hb_block->hb_seq), (unsigned long long)slot->ds_last_time, slot->ds_changed_samples, slot->ds_equal_samples); diff --git a/trunk/fs/ocfs2/cluster/tcp.c b/trunk/fs/ocfs2/cluster/tcp.c index d8d0c65ac03c..334f231a422c 100644 --- a/trunk/fs/ocfs2/cluster/tcp.c +++ b/trunk/fs/ocfs2/cluster/tcp.c @@ -485,7 +485,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, } if (was_valid && !valid) { - printk(KERN_NOTICE "o2net: no longer connected to " + printk(KERN_INFO "o2net: no longer connected to " SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); o2net_complete_nodes_nsw(nn); } @@ -493,7 +493,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, if (!was_valid && valid) { o2quo_conn_up(o2net_num_from_nn(nn)); cancel_delayed_work(&nn->nn_connect_expired); - printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n", + printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n", o2nm_this_node() > sc->sc_node->nd_num ? "connected to" : "accepted connection from", SC_NODEF_ARGS(sc)); @@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc, cond_resched(); continue; } - mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT + mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret); o2net_ensure_shutdown(nn, sc, 0); break; @@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data) do_gettimeofday(&now); - printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " + printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), o2net_idle_timeout() / 1000, o2net_idle_timeout() % 1000); mlog(ML_NOTICE, "here are some times that might help debug the " "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", - sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, + sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, now.tv_sec, (long) now.tv_usec, sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec, sc->sc_tv_advance_start.tv_sec, diff --git a/trunk/fs/ocfs2/cluster/tcp_internal.h b/trunk/fs/ocfs2/cluster/tcp_internal.h index 96fa7ebc530c..8d58cfe410b1 100644 --- a/trunk/fs/ocfs2/cluster/tcp_internal.h +++ b/trunk/fs/ocfs2/cluster/tcp_internal.h @@ -32,10 +32,10 @@ * on their number */ #define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS) -/* +/* * This version number represents quite a lot, unfortunately. It not * only represents the raw network message protocol on the wire but also - * locking semantics of the file system using the protocol. It should + * locking semantics of the file system using the protocol. It should * be somewhere else, I'm sure, but right now it isn't. * * With version 11, we separate out the filesystem locking portion. The diff --git a/trunk/fs/ocfs2/dlm/dlmapi.h b/trunk/fs/ocfs2/dlm/dlmapi.h index 3cfa114aa391..b5786a787fab 100644 --- a/trunk/fs/ocfs2/dlm/dlmapi.h +++ b/trunk/fs/ocfs2/dlm/dlmapi.h @@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err); mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \ } while (0) -#define DLM_LKSB_UNUSED1 0x01 +#define DLM_LKSB_UNUSED1 0x01 #define DLM_LKSB_PUT_LVB 0x02 #define DLM_LKSB_GET_LVB 0x04 #define DLM_LKSB_UNUSED2 0x08 diff --git a/trunk/fs/ocfs2/dlm/dlmast.c b/trunk/fs/ocfs2/dlm/dlmast.c index dccc439fa087..01cf8cc3d286 100644 --- a/trunk/fs/ocfs2/dlm/dlmast.c +++ b/trunk/fs/ocfs2/dlm/dlmast.c @@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) dlm_lock_put(lock); /* free up the reserved bast that we are cancelling. * guaranteed that this will not be the last reserved - * ast because *both* an ast and a bast were reserved + * ast because *both* an ast and a bast were reserved * to get to this point. the res->spinlock will not be * taken here */ dlm_lockres_release_ast(dlm, res); diff --git a/trunk/fs/ocfs2/dlm/dlmconvert.c b/trunk/fs/ocfs2/dlm/dlmconvert.c index f283bce776b4..ca96bce50e18 100644 --- a/trunk/fs/ocfs2/dlm/dlmconvert.c +++ b/trunk/fs/ocfs2/dlm/dlmconvert.c @@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, /* instead of logging the same network error over * and over, sleep here and wait for the heartbeat * to notice the node is dead. times out after 5s. */ - dlm_wait_for_node_death(dlm, res->owner, + dlm_wait_for_node_death(dlm, res->owner, DLM_NODE_DEATH_WAIT_MAX); ret = DLM_RECOVERING; mlog(0, "node %u died so returning DLM_RECOVERING " diff --git a/trunk/fs/ocfs2/dlm/dlmdebug.c b/trunk/fs/ocfs2/dlm/dlmdebug.c index 0cd24cf54396..42b0bad7a612 100644 --- a/trunk/fs/ocfs2/dlm/dlmdebug.c +++ b/trunk/fs/ocfs2/dlm/dlmdebug.c @@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) assert_spin_locked(&res->spinlock); stringify_lockname(res->lockname.name, res->lockname.len, - buf, sizeof(buf)); + buf, sizeof(buf) - 1); printk("lockres: %s, owner=%u, state=%u\n", buf, res->owner, res->state); printk(" last used: %lu, refcnt: %u, on purge list: %s\n", diff --git a/trunk/fs/ocfs2/dlm/dlmdomain.c b/trunk/fs/ocfs2/dlm/dlmdomain.c index 988c9055fd4e..0334000676d3 100644 --- a/trunk/fs/ocfs2/dlm/dlmdomain.c +++ b/trunk/fs/ocfs2/dlm/dlmdomain.c @@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, } /* Once the dlm ctxt is marked as leaving then we don't want - * to be put in someone's domain map. + * to be put in someone's domain map. * Also, explicitly disallow joining at certain troublesome * times (ie. during recovery). */ if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { diff --git a/trunk/fs/ocfs2/dlm/dlmlock.c b/trunk/fs/ocfs2/dlm/dlmlock.c index 733337772671..437698e9465f 100644 --- a/trunk/fs/ocfs2/dlm/dlmlock.c +++ b/trunk/fs/ocfs2/dlm/dlmlock.c @@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, } dlm_revert_pending_lock(res, lock); dlm_lock_put(lock); - } else if (dlm_is_recovery_lock(res->lockname.name, + } else if (dlm_is_recovery_lock(res->lockname.name, res->lockname.len)) { /* special case for the $RECOVERY lock. * there will never be an AST delivered to put diff --git a/trunk/fs/ocfs2/dlm/dlmmaster.c b/trunk/fs/ocfs2/dlm/dlmmaster.c index a659606dcb95..03ccf9a7b1f4 100644 --- a/trunk/fs/ocfs2/dlm/dlmmaster.c +++ b/trunk/fs/ocfs2/dlm/dlmmaster.c @@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) struct dlm_master_list_entry *mle; assert_spin_locked(&dlm->spinlock); - + list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { if (node_up) dlm_mle_node_up(dlm, mle, NULL, idx); @@ -833,7 +833,7 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, __dlm_insert_mle(dlm, mle); /* still holding the dlm spinlock, check the recovery map - * to see if there are any nodes that still need to be + * to see if there are any nodes that still need to be * considered. these will not appear in the mle nodemap * but they might own this lockres. wait on them. */ bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); @@ -883,7 +883,7 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, msleep(500); } continue; - } + } dlm_kick_recovery_thread(dlm); msleep(1000); @@ -939,8 +939,8 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, res->lockname.name, blocked); if (++tries > 20) { mlog(ML_ERROR, "%s:%.*s: spinning on " - "dlm_wait_for_lock_mastery, blocked=%d\n", - dlm->name, res->lockname.len, + "dlm_wait_for_lock_mastery, blocked=%d\n", + dlm->name, res->lockname.len, res->lockname.name, blocked); dlm_print_one_lock_resource(res); dlm_print_one_mle(mle); @@ -1029,7 +1029,7 @@ static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); b = (mle->type == DLM_MLE_BLOCK); if ((*blocked && !b) || (!*blocked && b)) { - mlog(0, "%s:%.*s: status change: old=%d new=%d\n", + mlog(0, "%s:%.*s: status change: old=%d new=%d\n", dlm->name, res->lockname.len, res->lockname.name, *blocked, b); *blocked = b; @@ -1602,7 +1602,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, } mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", dlm->node_num, res->lockname.len, res->lockname.name); - ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, + ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, DLM_ASSERT_MASTER_MLE_CLEANUP); if (ret < 0) { mlog(ML_ERROR, "failed to dispatch assert master work\n"); @@ -1701,7 +1701,7 @@ static int dlm_do_assert_master(struct dlm_ctxt *dlm, if (r & DLM_ASSERT_RESPONSE_REASSERT) { mlog(0, "%.*s: node %u create mles on other " - "nodes and requests a re-assert\n", + "nodes and requests a re-assert\n", namelen, lockname, to); reassert = 1; } @@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); goto done; - } + } } } spin_unlock(&dlm->master_lock); @@ -1883,7 +1883,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, int extra_ref = 0; int nn = -1; int rr, err = 0; - + spin_lock(&mle->spinlock); if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) extra_ref = 1; @@ -1891,7 +1891,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, /* MASTER mle: if any bits set in the response map * then the calling node needs to re-assert to clear * up nodes that this node contacted */ - while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, + while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, nn+1)) < O2NM_MAX_NODES) { if (nn != dlm->node_num && nn != assert->node_idx) master_request = 1; @@ -2002,7 +2002,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); spin_unlock(&dlm->spinlock); - *ret_data = (void *)res; + *ret_data = (void *)res; dlm_put(dlm); return -EINVAL; } @@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, item->u.am.request_from = request_from; item->u.am.flags = flags; - if (ignore_higher) - mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, + if (ignore_higher) + mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, res->lockname.name); - + spin_lock(&dlm->work_lock); list_add_tail(&item->list, &dlm->work_list); spin_unlock(&dlm->work_lock); @@ -2133,7 +2133,7 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) * think that $RECOVERY is currently mastered by a dead node. If so, * we wait a short time to allow that node to get notified by its own * heartbeat stack, then check again. All $RECOVERY lock resources - * mastered by dead nodes are purged when the hearbeat callback is + * mastered by dead nodes are purged when the hearbeat callback is * fired, so we can know for sure that it is safe to continue once * the node returns a live node or no node. */ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, @@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, ret = -EAGAIN; } spin_unlock(&dlm->spinlock); - mlog(0, "%s: reco lock master is %u\n", dlm->name, + mlog(0, "%s: reco lock master is %u\n", dlm->name, master); break; } @@ -2602,7 +2602,7 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, mlog(0, "%s:%.*s: timed out during migration\n", dlm->name, res->lockname.len, res->lockname.name); - /* avoid hang during shutdown when migrating lockres + /* avoid hang during shutdown when migrating lockres * to a node which also goes down */ if (dlm_is_node_dead(dlm, target)) { mlog(0, "%s:%.*s: expected migration " @@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); spin_unlock(&res->spinlock); - /* target has died, so make the caller break out of the + /* target has died, so make the caller break out of the * wait_event, but caller must recheck the domain_map */ spin_lock(&dlm->spinlock); if (!test_bit(mig_target, dlm->domain_map)) diff --git a/trunk/fs/ocfs2/dlm/dlmrecovery.c b/trunk/fs/ocfs2/dlm/dlmrecovery.c index 344bcf90cbf4..2f9e4e19a4f2 100644 --- a/trunk/fs/ocfs2/dlm/dlmrecovery.c +++ b/trunk/fs/ocfs2/dlm/dlmrecovery.c @@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, if (lock->ml.node == dead_node) { mlog(0, "AHA! there was " "a $RECOVERY lock for dead " - "node %u (%s)!\n", + "node %u (%s)!\n", dead_node, dlm->name); list_del_init(&lock->list); dlm_lock_put(lock); @@ -1164,39 +1164,6 @@ static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, mres->master = master; } -static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock, - struct dlm_migratable_lockres *mres, - int queue) -{ - if (!lock->lksb) - return; - - /* Ignore lvb in all locks in the blocked list */ - if (queue == DLM_BLOCKED_LIST) - return; - - /* Only consider lvbs in locks with granted EX or PR lock levels */ - if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) - return; - - if (dlm_lvb_is_empty(mres->lvb)) { - memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); - return; - } - - /* Ensure the lvb copied for migration matches in other valid locks */ - if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) - return; - - mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, " - "node=%u\n", - dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), - dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), - lock->lockres->lockname.len, lock->lockres->lockname.name, - lock->ml.node); - dlm_print_one_lock_resource(lock->lockres); - BUG(); -} /* returns 1 if this lock fills the network structure, * 0 otherwise */ @@ -1214,7 +1181,20 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock, ml->list = queue; if (lock->lksb) { ml->flags = lock->lksb->flags; - dlm_prepare_lvb_for_migration(lock, mres, queue); + /* send our current lvb */ + if (ml->type == LKM_EXMODE || + ml->type == LKM_PRMODE) { + /* if it is already set, this had better be a PR + * and it has to match */ + if (!dlm_lvb_is_empty(mres->lvb) && + (ml->type == LKM_EXMODE || + memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) { + mlog(ML_ERROR, "mismatched lvbs!\n"); + dlm_print_one_lock_resource(lock->lockres); + BUG(); + } + memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); + } } ml->node = lock->ml.node; mres->num_locks++; @@ -1750,7 +1730,6 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, struct dlm_lock *lock = NULL; u8 from = O2NM_MAX_NODES; unsigned int added = 0; - __be64 c; mlog(0, "running %d locks for this lockres\n", mres->num_locks); for (i=0; inum_locks; i++) { @@ -1798,48 +1777,19 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, /* lock is always created locally first, and * destroyed locally last. it must be on the list */ if (!lock) { - c = ml->cookie; - mlog(ML_ERROR, "Could not find local lock " - "with cookie %u:%llu, node %u, " - "list %u, flags 0x%x, type %d, " - "conv %d, highest blocked %d\n", - dlm_get_lock_cookie_node(be64_to_cpu(c)), - dlm_get_lock_cookie_seq(be64_to_cpu(c)), - ml->node, ml->list, ml->flags, ml->type, - ml->convert_type, ml->highest_blocked); - __dlm_print_one_lock_resource(res); - BUG(); - } - - if (lock->ml.node != ml->node) { - c = lock->ml.cookie; - mlog(ML_ERROR, "Mismatched node# in lock " - "cookie %u:%llu, name %.*s, node %u\n", + __be64 c = ml->cookie; + mlog(ML_ERROR, "could not find local lock " + "with cookie %u:%llu!\n", dlm_get_lock_cookie_node(be64_to_cpu(c)), - dlm_get_lock_cookie_seq(be64_to_cpu(c)), - res->lockname.len, res->lockname.name, - lock->ml.node); - c = ml->cookie; - mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " - "node %u, list %u, flags 0x%x, type %d, " - "conv %d, highest blocked %d\n", - dlm_get_lock_cookie_node(be64_to_cpu(c)), - dlm_get_lock_cookie_seq(be64_to_cpu(c)), - ml->node, ml->list, ml->flags, ml->type, - ml->convert_type, ml->highest_blocked); + dlm_get_lock_cookie_seq(be64_to_cpu(c))); __dlm_print_one_lock_resource(res); BUG(); } + BUG_ON(lock->ml.node != ml->node); if (tmpq != queue) { - c = ml->cookie; - mlog(0, "Lock cookie %u:%llu was on list %u " - "instead of list %u for %.*s\n", - dlm_get_lock_cookie_node(be64_to_cpu(c)), - dlm_get_lock_cookie_seq(be64_to_cpu(c)), - j, ml->list, res->lockname.len, - res->lockname.name); - __dlm_print_one_lock_resource(res); + mlog(0, "lock was on %u instead of %u for %.*s\n", + j, ml->list, res->lockname.len, res->lockname.name); spin_unlock(&res->spinlock); continue; } @@ -1889,7 +1839,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, * the lvb. */ memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); } else { - /* otherwise, the node is sending its + /* otherwise, the node is sending its * most recent valid lvb info */ BUG_ON(ml->type != LKM_EXMODE && ml->type != LKM_PRMODE); @@ -1936,7 +1886,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, spin_lock(&res->spinlock); list_for_each_entry(lock, queue, list) { if (lock->ml.cookie == ml->cookie) { - c = lock->ml.cookie; + __be64 c = lock->ml.cookie; mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " "exists on this lockres!\n", dlm->name, res->lockname.len, res->lockname.name, @@ -2164,7 +2114,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, assert_spin_locked(&res->spinlock); if (res->owner == dlm->node_num) - /* if this node owned the lockres, and if the dead node + /* if this node owned the lockres, and if the dead node * had an EX when he died, blank out the lvb */ search_node = dead_node; else { @@ -2202,7 +2152,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, /* this node is the lockres master: * 1) remove any stale locks for the dead node - * 2) if the dead node had an EX when he died, blank out the lvb + * 2) if the dead node had an EX when he died, blank out the lvb */ assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); @@ -2243,12 +2193,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, mlog(0, "%s:%.*s: freed %u locks for dead node %u, " "dropping ref from lockres\n", dlm->name, res->lockname.len, res->lockname.name, freed, dead_node); - if(!test_bit(dead_node, res->refmap)) { - mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, " - "but ref was not set\n", dlm->name, - res->lockname.len, res->lockname.name, freed, dead_node); - __dlm_print_one_lock_resource(res); - } + BUG_ON(!test_bit(dead_node, res->refmap)); dlm_lockres_clear_refmap_bit(dead_node, res); } else if (test_bit(dead_node, res->refmap)) { mlog(0, "%s:%.*s: dead node %u had a ref, but had " @@ -2315,7 +2260,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) } spin_unlock(&res->spinlock); continue; - } + } spin_lock(&res->spinlock); /* zero the lvb if necessary */ dlm_revalidate_lvb(dlm, res, dead_node); @@ -2466,7 +2411,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) * this function on each node racing to become the recovery * master will not stop attempting this until either: * a) this node gets the EX (and becomes the recovery master), - * or b) dlm->reco.new_master gets set to some nodenum + * or b) dlm->reco.new_master gets set to some nodenum * != O2NM_INVALID_NODE_NUM (another node will do the reco). * so each time a recovery master is needed, the entire cluster * will sync at this point. if the new master dies, that will @@ -2479,7 +2424,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); -again: +again: memset(&lksb, 0, sizeof(lksb)); ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, @@ -2492,8 +2437,8 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) if (ret == DLM_NORMAL) { mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", dlm->name, dlm->node_num); - - /* got the EX lock. check to see if another node + + /* got the EX lock. check to see if another node * just became the reco master */ if (dlm_reco_master_ready(dlm)) { mlog(0, "%s: got reco EX lock, but %u will " @@ -2506,12 +2451,12 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) /* see if recovery was already finished elsewhere */ spin_lock(&dlm->spinlock); if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { - status = -EINVAL; + status = -EINVAL; mlog(0, "%s: got reco EX lock, but " "node got recovered already\n", dlm->name); if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { mlog(ML_ERROR, "%s: new master is %u " - "but no dead node!\n", + "but no dead node!\n", dlm->name, dlm->reco.new_master); BUG(); } @@ -2523,7 +2468,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) * set the master and send the messages to begin recovery */ if (!status) { mlog(0, "%s: dead=%u, this=%u, sending " - "begin_reco now\n", dlm->name, + "begin_reco now\n", dlm->name, dlm->reco.dead_node, dlm->node_num); status = dlm_send_begin_reco_message(dlm, dlm->reco.dead_node); @@ -2556,7 +2501,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", dlm->name, dlm->node_num); /* another node is master. wait on - * reco.new_master != O2NM_INVALID_NODE_NUM + * reco.new_master != O2NM_INVALID_NODE_NUM * for at most one second */ wait_event_timeout(dlm->dlm_reco_thread_wq, dlm_reco_master_ready(dlm), @@ -2644,13 +2589,7 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) "begin reco msg (%d)\n", dlm->name, nodenum, ret); ret = 0; } - - /* - * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8, - * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN. - * We are handling both for compatibility reasons. - */ - if (ret == -EAGAIN || ret == EAGAIN) { + if (ret == -EAGAIN) { mlog(0, "%s: trying to start recovery of node " "%u, but node %u is waiting for last recovery " "to complete, backoff for a bit\n", dlm->name, @@ -2660,7 +2599,7 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) } if (ret < 0) { struct dlm_lock_resource *res; - /* this is now a serious problem, possibly ENOMEM + /* this is now a serious problem, possibly ENOMEM * in the network stack. must retry */ mlog_errno(ret); mlog(ML_ERROR, "begin reco of dlm %s to node %u " @@ -2673,7 +2612,7 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) } else { mlog(ML_ERROR, "recovery lock not found\n"); } - /* sleep for a bit in hopes that we can avoid + /* sleep for a bit in hopes that we can avoid * another ENOMEM */ msleep(100); goto retry; @@ -2725,7 +2664,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, } if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { mlog(ML_NOTICE, "%s: dead_node previously set to %u, " - "node %u changing it to %u\n", dlm->name, + "node %u changing it to %u\n", dlm->name, dlm->reco.dead_node, br->node_idx, br->dead_node); } dlm_set_reco_master(dlm, br->node_idx); @@ -2791,8 +2730,8 @@ static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) if (ret < 0) { mlog_errno(ret); if (dlm_is_host_down(ret)) { - /* this has no effect on this recovery - * session, so set the status to zero to + /* this has no effect on this recovery + * session, so set the status to zero to * finish out the last recovery */ mlog(ML_ERROR, "node %u went down after this " "node finished recovery.\n", nodenum); @@ -2829,7 +2768,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, mlog(0, "%s: node %u finalizing recovery stage%d of " "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); - + spin_lock(&dlm->spinlock); if (dlm->reco.new_master != fr->node_idx) { diff --git a/trunk/fs/ocfs2/dlm/dlmunlock.c b/trunk/fs/ocfs2/dlm/dlmunlock.c index 49e29ecd0201..00f53b2aea76 100644 --- a/trunk/fs/ocfs2/dlm/dlmunlock.c +++ b/trunk/fs/ocfs2/dlm/dlmunlock.c @@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, actions &= ~(DLM_UNLOCK_REMOVE_LOCK| DLM_UNLOCK_REGRANT_LOCK| DLM_UNLOCK_CLEAR_CONVERT_TYPE); - } else if (status == DLM_RECOVERING || - status == DLM_MIGRATING || + } else if (status == DLM_RECOVERING || + status == DLM_MIGRATING || status == DLM_FORWARD) { /* must clear the actions because this unlock * is about to be retried. cannot free or do @@ -661,14 +661,14 @@ enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb, if (call_ast) { mlog(0, "calling unlockast(%p, %d)\n", data, status); if (is_master) { - /* it is possible that there is one last bast + /* it is possible that there is one last bast * pending. make sure it is flushed, then * call the unlockast. * not an issue if this is a mastered remotely, * since this lock has been removed from the * lockres queues and cannot be found. */ dlm_kick_thread(dlm, NULL); - wait_event(dlm->ast_wq, + wait_event(dlm->ast_wq, dlm_lock_basts_flushed(dlm, lock)); } (*unlockast)(data, status); diff --git a/trunk/fs/ocfs2/dlmglue.c b/trunk/fs/ocfs2/dlmglue.c index e044019cb3b1..c5e4a49e3a12 100644 --- a/trunk/fs/ocfs2/dlmglue.c +++ b/trunk/fs/ocfs2/dlmglue.c @@ -875,14 +875,6 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); lockres->l_level = lockres->l_requested; - - /* - * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing - * the OCFS2_LOCK_BUSY flag to prevent the dc thread from - * downconverting the lock before the upconvert has fully completed. - */ - lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); - lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); mlog_exit_void(); @@ -915,6 +907,8 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, assert_spin_locked(&lockres->l_lock); + lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); + if (level > lockres->l_blocking) { /* only schedule a downconvert if we haven't already scheduled * one that goes low enough to satisfy the level we're @@ -927,9 +921,6 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, lockres->l_blocking = level; } - if (needs_downconvert) - lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); - mlog_exit(needs_downconvert); return needs_downconvert; } @@ -1142,7 +1133,6 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, mlog_entry_void(); spin_lock_irqsave(&lockres->l_lock, flags); lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); - lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); if (convert) lockres->l_action = OCFS2_AST_INVALID; else @@ -1333,13 +1323,13 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb, again: wait = 0; - spin_lock_irqsave(&lockres->l_lock, flags); - if (catch_signals && signal_pending(current)) { ret = -ERESTARTSYS; - goto unlock; + goto out; } + spin_lock_irqsave(&lockres->l_lock, flags); + mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING, "Cluster lock called on freeing lockres %s! flags " "0x%lx\n", lockres->l_name, lockres->l_flags); @@ -1356,25 +1346,6 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb, goto unlock; } - if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) { - /* - * We've upconverted. If the lock now has a level we can - * work with, we take it. If, however, the lock is not at the - * required level, we go thru the full cycle. One way this could - * happen is if a process requesting an upconvert to PR is - * closely followed by another requesting upconvert to an EX. - * If the process requesting EX lands here, we want it to - * continue attempting to upconvert and let the process - * requesting PR take the lock. - * If multiple processes request upconvert to PR, the first one - * here will take the lock. The others will have to go thru the - * OCFS2_LOCK_BLOCKED check to ensure that there is no pending - * downconvert request. - */ - if (level <= lockres->l_level) - goto update_holders; - } - if (lockres->l_flags & OCFS2_LOCK_BLOCKED && !ocfs2_may_continue_on_blocked_lock(lockres, level)) { /* is the lock is currently blocked on behalf of @@ -1445,14 +1416,11 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb, goto again; } -update_holders: /* Ok, if we get here then we're good to go. */ ocfs2_inc_holders(lockres, level); ret = 0; unlock: - lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); - spin_unlock_irqrestore(&lockres->l_lock, flags); out: /* @@ -3187,7 +3155,7 @@ static int ocfs2_drop_lock(struct ocfs2_super *osb, /* Mark the lockres as being dropped. It will no longer be * queued if blocking, but we still may have to wait on it * being dequeued from the downconvert thread before we can consider - * it safe to drop. + * it safe to drop. * * You can *not* attempt to call cluster_lock on this lockres anymore. */ void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres) @@ -3384,7 +3352,6 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, unsigned long flags; int blocking; int new_level; - int level; int ret = 0; int set_lvb = 0; unsigned int gen; @@ -3393,17 +3360,9 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, spin_lock_irqsave(&lockres->l_lock, flags); -recheck: - /* - * Is it still blocking? If not, we have no more work to do. - */ - if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) { - BUG_ON(lockres->l_blocking != DLM_LOCK_NL); - spin_unlock_irqrestore(&lockres->l_lock, flags); - ret = 0; - goto leave; - } + BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); +recheck: if (lockres->l_flags & OCFS2_LOCK_BUSY) { /* XXX * This is a *big* race. The OCFS2_LOCK_PENDING flag @@ -3442,31 +3401,6 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, goto leave; } - /* - * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is - * set when the ast is received for an upconvert just before the - * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast - * on the heels of the ast, we want to delay the downconvert just - * enough to allow the up requestor to do its task. Because this - * lock is in the blocked queue, the lock will be downconverted - * as soon as the requestor is done with the lock. - */ - if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) - goto leave_requeue; - - /* - * How can we block and yet be at NL? We were trying to upconvert - * from NL and got canceled. The code comes back here, and now - * we notice and clear BLOCKING. - */ - if (lockres->l_level == DLM_LOCK_NL) { - BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders); - lockres->l_blocking = DLM_LOCK_NL; - lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED); - spin_unlock_irqrestore(&lockres->l_lock, flags); - goto leave; - } - /* if we're blocking an exclusive and we have *any* holders, * then requeue. */ if ((lockres->l_blocking == DLM_LOCK_EX) @@ -3504,7 +3438,6 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, * may sleep, so we save off a copy of what we're blocking as * it may change while we're not holding the spin lock. */ blocking = lockres->l_blocking; - level = lockres->l_level; spin_unlock_irqrestore(&lockres->l_lock, flags); ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking); @@ -3513,7 +3446,7 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, goto leave; spin_lock_irqsave(&lockres->l_lock, flags); - if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) { + if (blocking != lockres->l_blocking) { /* If this changed underneath us, then we can't drop * it just yet. */ goto recheck; diff --git a/trunk/fs/ocfs2/export.c b/trunk/fs/ocfs2/export.c index 19ad145d2af3..15713cbb865c 100644 --- a/trunk/fs/ocfs2/export.c +++ b/trunk/fs/ocfs2/export.c @@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", (unsigned long long)blkno, generation); } - + *max_len = len; bail: diff --git a/trunk/fs/ocfs2/extent_map.c b/trunk/fs/ocfs2/extent_map.c index 5328529e7fd2..d35a27f4523e 100644 --- a/trunk/fs/ocfs2/extent_map.c +++ b/trunk/fs/ocfs2/extent_map.c @@ -192,7 +192,7 @@ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi, emi->ei_clusters += ins->ei_clusters; return 1; } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys && - (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos && + (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys && ins->ei_flags == emi->ei_flags) { emi->ei_phys = ins->ei_phys; emi->ei_cpos = ins->ei_cpos; diff --git a/trunk/fs/ocfs2/file.c b/trunk/fs/ocfs2/file.c index 558ce0312421..06ccf6a86d35 100644 --- a/trunk/fs/ocfs2/file.c +++ b/trunk/fs/ocfs2/file.c @@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode, int ret; offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */ - /* ugh. in prepare/commit_write, if from==to==start of block, we + /* ugh. in prepare/commit_write, if from==to==start of block, we ** skip the prepare. make sure we never send an offset for the start ** of a block */ @@ -1779,7 +1779,7 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry, struct inode *inode = dentry->d_inode; loff_t saved_pos, end; - /* + /* * We start with a read level meta lock and only jump to an ex * if we need to make modifications here. */ @@ -2013,8 +2013,8 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, /* buffered aio wouldn't have proper lock coverage today */ BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); - if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || - ((file->f_flags & O_DIRECT) && has_refcount)) { + if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) || + (file->f_flags & O_DIRECT && has_refcount)) { ret = filemap_fdatawrite_range(file->f_mapping, pos, pos + count - 1); if (ret < 0) @@ -2033,7 +2033,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, pos + count - 1); } - /* + /* * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io * function pointer which is called when o_direct io completes so that * it can unlock our rw lock. (it's the clustered equivalent of @@ -2198,7 +2198,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, goto bail; } - /* + /* * buffered reads protect themselves in ->readpage(). O_DIRECT reads * need locks to protect pending reads from racing with truncate. */ @@ -2220,10 +2220,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, * We're fine letting folks race truncates and extending * writes with read across the cluster, just like they can * locally. Hence no rw_lock during read. - * + * * Take and drop the meta data lock to update inode fields * like i_size. This allows the checks down below - * generic_file_aio_read() a chance of actually working. + * generic_file_aio_read() a chance of actually working. */ ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); if (ret < 0) { @@ -2248,7 +2248,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, bail: if (have_alloc_sem) up_read(&inode->i_alloc_sem); - if (rw_level != -1) + if (rw_level != -1) ocfs2_rw_unlock(inode, rw_level); mlog_exit(ret); diff --git a/trunk/fs/ocfs2/inode.c b/trunk/fs/ocfs2/inode.c index 88459bdd1ff3..0297fb8982b8 100644 --- a/trunk/fs/ocfs2/inode.c +++ b/trunk/fs/ocfs2/inode.c @@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode, if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) { status = ocfs2_try_open_lock(inode, 0); if (status) { - make_bad_inode(inode); + make_bad_inode(inode); return status; } } @@ -684,7 +684,7 @@ static int ocfs2_remove_inode(struct inode *inode, return status; } -/* +/* * Serialize with orphan dir recovery. If the process doing * recovery on this orphan dir does an iget() with the dir * i_mutex held, we'll deadlock here. Instead we detect this diff --git a/trunk/fs/ocfs2/ioctl.c b/trunk/fs/ocfs2/ioctl.c index 7d9d9c132cef..31fbb0619510 100644 --- a/trunk/fs/ocfs2/ioctl.c +++ b/trunk/fs/ocfs2/ioctl.c @@ -7,7 +7,6 @@ #include #include -#include #define MLOG_MASK_PREFIX ML_INODE #include @@ -182,10 +181,6 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) #ifdef CONFIG_COMPAT long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) { - bool preserve; - struct reflink_arguments args; - struct inode *inode = file->f_path.dentry->d_inode; - switch (cmd) { case OCFS2_IOC32_GETFLAGS: cmd = OCFS2_IOC_GETFLAGS; @@ -200,15 +195,8 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) case OCFS2_IOC_GROUP_EXTEND: case OCFS2_IOC_GROUP_ADD: case OCFS2_IOC_GROUP_ADD64: - break; case OCFS2_IOC_REFLINK: - if (copy_from_user(&args, (struct reflink_arguments *)arg, - sizeof(args))) - return -EFAULT; - preserve = (args.preserve != 0); - - return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path), - compat_ptr(args.new_path), preserve); + break; default: return -ENOIOCTLCMD; } diff --git a/trunk/fs/ocfs2/journal.c b/trunk/fs/ocfs2/journal.c index 9336c60e3a36..bf34c491ae96 100644 --- a/trunk/fs/ocfs2/journal.c +++ b/trunk/fs/ocfs2/journal.c @@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb, status = -ENOENT; mlog_errno(status); return status; - } + } mutex_lock(&orphan_dir_inode->i_mutex); status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0); diff --git a/trunk/fs/ocfs2/ocfs2.h b/trunk/fs/ocfs2/ocfs2.h index 740f448041e2..9362eea7424b 100644 --- a/trunk/fs/ocfs2/ocfs2.h +++ b/trunk/fs/ocfs2/ocfs2.h @@ -136,10 +136,6 @@ enum ocfs2_unlock_action { #define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a call to dlm_lock. Only exists with BUSY set. */ -#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread - * from downconverting - * before the upconvert - * has completed */ struct ocfs2_lock_res_ops; diff --git a/trunk/fs/ocfs2/ocfs2_fs.h b/trunk/fs/ocfs2/ocfs2_fs.h index 7638a38c32bc..1a1a679e51b5 100644 --- a/trunk/fs/ocfs2/ocfs2_fs.h +++ b/trunk/fs/ocfs2/ocfs2_fs.h @@ -1417,16 +1417,9 @@ static inline int ocfs2_fast_symlink_chars(int blocksize) return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink); } -static inline int ocfs2_max_inline_data_with_xattr(int blocksize, - struct ocfs2_dinode *di) +static inline int ocfs2_max_inline_data(int blocksize) { - if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL)) - return blocksize - - offsetof(struct ocfs2_dinode, id2.i_data.id_data) - - di->i_xattr_inline_size; - else - return blocksize - - offsetof(struct ocfs2_dinode, id2.i_data.id_data); + return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data); } static inline int ocfs2_extent_recs_per_inode(int blocksize) diff --git a/trunk/fs/ocfs2/refcounttree.c b/trunk/fs/ocfs2/refcounttree.c index 8ae65c9c020c..74db2be75dd6 100644 --- a/trunk/fs/ocfs2/refcounttree.c +++ b/trunk/fs/ocfs2/refcounttree.c @@ -2945,7 +2945,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, while (offset < end) { page_index = offset >> PAGE_CACHE_SHIFT; - map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; + map_end = (page_index + 1) << PAGE_CACHE_SHIFT; if (map_end > end) map_end = end; @@ -2957,12 +2957,8 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, page = grab_cache_page(mapping, page_index); - /* - * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page - * can't be dirtied before we CoW it out. - */ - if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) - BUG_ON(PageDirty(page)); + /* This page can't be dirtied before we CoW it out. */ + BUG_ON(PageDirty(page)); if (!PageUptodate(page)) { ret = block_read_full_page(page, ocfs2_get_block); @@ -3174,7 +3170,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb, while (offset < end) { page_index = offset >> PAGE_CACHE_SHIFT; - map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; + map_end = (page_index + 1) << PAGE_CACHE_SHIFT; if (map_end > end) map_end = end; diff --git a/trunk/fs/ocfs2/stack_o2cb.c b/trunk/fs/ocfs2/stack_o2cb.c index 3038c92af493..e49c41050264 100644 --- a/trunk/fs/ocfs2/stack_o2cb.c +++ b/trunk/fs/ocfs2/stack_o2cb.c @@ -277,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) u32 dlm_key; struct dlm_ctxt *dlm; struct o2dlm_private *priv; - struct dlm_protocol_version fs_version; + struct dlm_protocol_version dlm_version; BUG_ON(conn == NULL); BUG_ON(o2cb_stack.sp_proto == NULL); @@ -304,18 +304,18 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) /* used by the dlm code to make message headers unique, each * node in this domain must agree on this. */ dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen); - fs_version.pv_major = conn->cc_version.pv_major; - fs_version.pv_minor = conn->cc_version.pv_minor; + dlm_version.pv_major = conn->cc_version.pv_major; + dlm_version.pv_minor = conn->cc_version.pv_minor; - dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version); + dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version); if (IS_ERR(dlm)) { rc = PTR_ERR(dlm); mlog_errno(rc); goto out_free; } - conn->cc_version.pv_major = fs_version.pv_major; - conn->cc_version.pv_minor = fs_version.pv_minor; + conn->cc_version.pv_major = dlm_version.pv_major; + conn->cc_version.pv_minor = dlm_version.pv_minor; conn->cc_lockspace = dlm; dlm_register_eviction_cb(dlm, &priv->op_eviction_cb); diff --git a/trunk/fs/ocfs2/super.c b/trunk/fs/ocfs2/super.c index 755cd49a5ef3..26069917a9f5 100644 --- a/trunk/fs/ocfs2/super.c +++ b/trunk/fs/ocfs2/super.c @@ -1062,7 +1062,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) "file system, but write access is " "unavailable.\n"); else - mlog_errno(status); + mlog_errno(status); goto read_super_error; } diff --git a/trunk/fs/ocfs2/symlink.c b/trunk/fs/ocfs2/symlink.c index 32499d213fc4..49b133ccbf11 100644 --- a/trunk/fs/ocfs2/symlink.c +++ b/trunk/fs/ocfs2/symlink.c @@ -137,20 +137,20 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry, } memcpy(link, target, len); + nd_set_link(nd, link); bail: - nd_set_link(nd, status ? ERR_PTR(status) : link); brelse(bh); mlog_exit(status); - return NULL; + return status ? ERR_PTR(status) : link; } static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { - char *link = nd_get_link(nd); - if (!IS_ERR(link)) - kfree(link); + char *link = cookie; + + kfree(link); } const struct inode_operations ocfs2_symlink_inode_operations = { diff --git a/trunk/fs/ocfs2/uptodate.c b/trunk/fs/ocfs2/uptodate.c index a0a120e82b97..c61369342a27 100644 --- a/trunk/fs/ocfs2/uptodate.c +++ b/trunk/fs/ocfs2/uptodate.c @@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci, } /* Warning: even if it returns true, this does *not* guarantee that - * the block is stored in our inode metadata cache. - * + * the block is stored in our inode metadata cache. + * * This can be called under lock_buffer() */ int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci, diff --git a/trunk/net/9p/client.c b/trunk/net/9p/client.c index 09d4f1e2e4a8..8af95b2dddd6 100644 --- a/trunk/net/9p/client.c +++ b/trunk/net/9p/client.c @@ -69,7 +69,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...); static int parse_opts(char *opts, struct p9_client *clnt) { - char *options, *tmp_options; + char *options; char *p; substring_t args[MAX_OPT_ARGS]; int option; @@ -81,13 +81,12 @@ static int parse_opts(char *opts, struct p9_client *clnt) if (!opts) return 0; - tmp_options = kstrdup(opts, GFP_KERNEL); - if (!tmp_options) { + options = kstrdup(opts, GFP_KERNEL); + if (!options) { P9_DPRINTK(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } - options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -109,13 +108,6 @@ static int parse_opts(char *opts, struct p9_client *clnt) break; case Opt_trans: clnt->trans_mod = v9fs_get_trans_by_name(&args[0]); - if(clnt->trans_mod == NULL) { - P9_DPRINTK(P9_DEBUG_ERROR, - "Could not find request transport: %s\n", - (char *) &args[0]); - ret = -EINVAL; - goto free_and_return; - } break; case Opt_legacy: clnt->dotu = 0; @@ -125,8 +117,7 @@ static int parse_opts(char *opts, struct p9_client *clnt) } } -free_and_return: - kfree(tmp_options); + kfree(options); return ret; } @@ -676,12 +667,18 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) clnt->trans = NULL; spin_lock_init(&clnt->lock); INIT_LIST_HEAD(&clnt->fidlist); + clnt->fidpool = p9_idpool_create(); + if (IS_ERR(clnt->fidpool)) { + err = PTR_ERR(clnt->fidpool); + clnt->fidpool = NULL; + goto error; + } p9_tag_init(clnt); err = parse_opts(options, clnt); if (err < 0) - goto free_client; + goto error; if (!clnt->trans_mod) clnt->trans_mod = v9fs_get_default_trans(); @@ -690,14 +687,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) err = -EPROTONOSUPPORT; P9_DPRINTK(P9_DEBUG_ERROR, "No transport defined or default transport\n"); - goto free_client; - } - - clnt->fidpool = p9_idpool_create(); - if (IS_ERR(clnt->fidpool)) { - err = PTR_ERR(clnt->fidpool); - clnt->fidpool = NULL; - goto put_trans; + goto error; } P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n", @@ -705,25 +695,19 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) err = clnt->trans_mod->create(clnt, dev_name, options); if (err) - goto destroy_fidpool; + goto error; if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize) clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ; err = p9_client_version(clnt); if (err) - goto close_trans; + goto error; return clnt; -close_trans: - clnt->trans_mod->close(clnt); -destroy_fidpool: - p9_idpool_destroy(clnt->fidpool); -put_trans: - v9fs_put_trans(clnt->trans_mod); -free_client: - kfree(clnt); +error: + p9_client_destroy(clnt); return ERR_PTR(err); } EXPORT_SYMBOL(p9_client_create); @@ -1230,11 +1214,10 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional) { int ret; - /* NOTE: size shouldn't include its own length */ /* size[2] type[2] dev[4] qid[13] */ /* mode[4] atime[4] mtime[4] length[8]*/ /* name[s] uid[s] gid[s] muid[s] */ - ret = 2+4+13+4+4+4+8+2+2+2+2; + ret = 2+2+4+13+4+4+4+8+2+2+2+2; if (wst->name) ret += strlen(wst->name); @@ -1275,7 +1258,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) wst->name, wst->uid, wst->gid, wst->muid, wst->extension, wst->n_uid, wst->n_gid, wst->n_muid); - req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst); + req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst); if (IS_ERR(req)) { err = PTR_ERR(req); goto error; diff --git a/trunk/net/9p/trans_fd.c b/trunk/net/9p/trans_fd.c index 31d0b05582a9..be1cb909d8c0 100644 --- a/trunk/net/9p/trans_fd.c +++ b/trunk/net/9p/trans_fd.c @@ -714,7 +714,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts) char *p; substring_t args[MAX_OPT_ARGS]; int option; - char *options, *tmp_options; + char *options; int ret; opts->port = P9_PORT; @@ -724,13 +724,12 @@ static int parse_opts(char *params, struct p9_fd_opts *opts) if (!params) return 0; - tmp_options = kstrdup(params, GFP_KERNEL); - if (!tmp_options) { + options = kstrdup(params, GFP_KERNEL); + if (!options) { P9_DPRINTK(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } - options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -761,8 +760,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts) continue; } } - - kfree(tmp_options); + kfree(options); return 0; } diff --git a/trunk/net/9p/trans_rdma.c b/trunk/net/9p/trans_rdma.c index 2c95a89c0f46..65cb29db03f8 100644 --- a/trunk/net/9p/trans_rdma.c +++ b/trunk/net/9p/trans_rdma.c @@ -166,7 +166,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts) char *p; substring_t args[MAX_OPT_ARGS]; int option; - char *options, *tmp_options; + char *options; int ret; opts->port = P9_PORT; @@ -177,13 +177,12 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts) if (!params) return 0; - tmp_options = kstrdup(params, GFP_KERNEL); - if (!tmp_options) { + options = kstrdup(params, GFP_KERNEL); + if (!options) { P9_DPRINTK(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } - options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -217,7 +216,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts) } /* RQ must be at least as large as the SQ */ opts->rq_depth = max(opts->rq_depth, opts->sq_depth); - kfree(tmp_options); + kfree(options); return 0; } diff --git a/trunk/net/9p/trans_virtio.c b/trunk/net/9p/trans_virtio.c index cb50f4ae5eef..ea1e3daabefe 100644 --- a/trunk/net/9p/trans_virtio.c +++ b/trunk/net/9p/trans_virtio.c @@ -102,8 +102,7 @@ static void p9_virtio_close(struct p9_client *client) struct virtio_chan *chan = client->trans; mutex_lock(&virtio_9p_lock); - if (chan) - chan->inuse = false; + chan->inuse = false; mutex_unlock(&virtio_9p_lock); } @@ -312,7 +311,6 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args) } client->trans = (void *)chan; - client->status = Connected; chan->client = client; return 0;