diff --git a/[refs] b/[refs] index 53147f867e8c..c0c5c75c7fea 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 153d7f3fcae7ed4e19328549aa9467acdfbced10 +refs/heads/master: 083edca05ab1fa6efac1ba414018f7f45a4a83ff diff --git a/trunk/block/blktrace.c b/trunk/block/blktrace.c index 265f7a830619..b8c0702777ff 100644 --- a/trunk/block/blktrace.c +++ b/trunk/block/blktrace.c @@ -80,7 +80,7 @@ static u32 bio_act[5] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_AC #define trace_sync_bit(rw) \ (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1)) #define trace_ahead_bit(rw) \ - (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD)) + (((rw) & (1 << BIO_RW_AHEAD)) << (BIO_RW_AHEAD - 0)) /* * The worker for the various blk_add_trace*() types. Fills out a diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c index aae3123bf3ee..102ebc2c5c34 100644 --- a/trunk/block/cfq-iosched.c +++ b/trunk/block/cfq-iosched.c @@ -936,7 +936,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) * seeks. so allow a little bit of time for him to submit a new rq */ if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) - sl = min(sl, msecs_to_jiffies(2)); + sl = 2; mod_timer(&cfqd->idle_slice_timer, jiffies + sl); return 1; diff --git a/trunk/drivers/block/cciss.c b/trunk/drivers/block/cciss.c index 7b0eca703a67..1c4df22dfd2a 100644 --- a/trunk/drivers/block/cciss.c +++ b/trunk/drivers/block/cciss.c @@ -1233,50 +1233,6 @@ static inline void complete_buffers(struct bio *bio, int status) } } -static void cciss_check_queues(ctlr_info_t *h) -{ - int start_queue = h->next_to_run; - int i; - - /* check to see if we have maxed out the number of commands that can - * be placed on the queue. If so then exit. We do this check here - * in case the interrupt we serviced was from an ioctl and did not - * free any new commands. - */ - if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) - return; - - /* We have room on the queue for more commands. Now we need to queue - * them up. We will also keep track of the next queue to run so - * that every queue gets a chance to be started first. - */ - for (i = 0; i < h->highest_lun + 1; i++) { - int curr_queue = (start_queue + i) % (h->highest_lun + 1); - /* make sure the disk has been added and the drive is real - * because this can be called from the middle of init_one. - */ - if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) - continue; - blk_start_queue(h->gendisk[curr_queue]->queue); - - /* check to see if we have maxed out the number of commands - * that can be placed on the queue. - */ - if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) { - if (curr_queue == start_queue) { - h->next_to_run = - (start_queue + 1) % (h->highest_lun + 1); - break; - } else { - h->next_to_run = curr_queue; - break; - } - } else { - curr_queue = (curr_queue + 1) % (h->highest_lun + 1); - } - } -} - static void cciss_softirq_done(struct request *rq) { CommandList_struct *cmd = rq->completion_data; @@ -1308,7 +1264,6 @@ static void cciss_softirq_done(struct request *rq) spin_lock_irqsave(&h->lock, flags); end_that_request_last(rq, rq->errors); cmd_free(h, cmd, 1); - cciss_check_queues(h); spin_unlock_irqrestore(&h->lock, flags); } @@ -2573,6 +2528,8 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) CommandList_struct *c; unsigned long flags; __u32 a, a1, a2; + int j; + int start_queue = h->next_to_run; if (interrupt_not_for_us(h)) return IRQ_NONE; @@ -2631,6 +2588,45 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) } } + /* check to see if we have maxed out the number of commands that can + * be placed on the queue. If so then exit. We do this check here + * in case the interrupt we serviced was from an ioctl and did not + * free any new commands. + */ + if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) + goto cleanup; + + /* We have room on the queue for more commands. Now we need to queue + * them up. We will also keep track of the next queue to run so + * that every queue gets a chance to be started first. + */ + for (j = 0; j < h->highest_lun + 1; j++) { + int curr_queue = (start_queue + j) % (h->highest_lun + 1); + /* make sure the disk has been added and the drive is real + * because this can be called from the middle of init_one. + */ + if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) + continue; + blk_start_queue(h->gendisk[curr_queue]->queue); + + /* check to see if we have maxed out the number of commands + * that can be placed on the queue. + */ + if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) { + if (curr_queue == start_queue) { + h->next_to_run = + (start_queue + 1) % (h->highest_lun + 1); + goto cleanup; + } else { + h->next_to_run = curr_queue; + goto cleanup; + } + } else { + curr_queue = (curr_queue + 1) % (h->highest_lun + 1); + } + } + + cleanup: spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); return IRQ_HANDLED; } diff --git a/trunk/drivers/cpufreq/cpufreq.c b/trunk/drivers/cpufreq/cpufreq.c index bc1088d9b379..8d328186f774 100644 --- a/trunk/drivers/cpufreq/cpufreq.c +++ b/trunk/drivers/cpufreq/cpufreq.c @@ -364,12 +364,10 @@ static ssize_t store_##file_name \ if (ret != 1) \ return -EINVAL; \ \ - lock_cpu_hotplug(); \ mutex_lock(&policy->lock); \ ret = __cpufreq_set_policy(policy, &new_policy); \ policy->user_policy.object = policy->object; \ mutex_unlock(&policy->lock); \ - unlock_cpu_hotplug(); \ \ return ret ? ret : count; \ } @@ -1199,18 +1197,20 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); *********************************************************************/ -/* Must be called with lock_cpu_hotplug held */ int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { int retval = -EINVAL; + lock_cpu_hotplug(); dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, target_freq, relation); if (cpu_online(policy->cpu) && cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); + unlock_cpu_hotplug(); + return retval; } EXPORT_SYMBOL_GPL(__cpufreq_driver_target); @@ -1225,23 +1225,17 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, if (!policy) return -EINVAL; - lock_cpu_hotplug(); mutex_lock(&policy->lock); ret = __cpufreq_driver_target(policy, target_freq, relation); mutex_unlock(&policy->lock); - unlock_cpu_hotplug(); cpufreq_cpu_put(policy); return ret; } EXPORT_SYMBOL_GPL(cpufreq_driver_target); -/* - * Locking: Must be called with the lock_cpu_hotplug() lock held - * when "event" is CPUFREQ_GOV_LIMITS - */ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) { @@ -1263,6 +1257,24 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) } +int cpufreq_governor(unsigned int cpu, unsigned int event) +{ + int ret = 0; + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + + if (!policy) + return -EINVAL; + + mutex_lock(&policy->lock); + ret = __cpufreq_governor(policy, event); + mutex_unlock(&policy->lock); + + cpufreq_cpu_put(policy); + return ret; +} +EXPORT_SYMBOL_GPL(cpufreq_governor); + + int cpufreq_register_governor(struct cpufreq_governor *governor) { struct cpufreq_governor *t; @@ -1330,9 +1342,6 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) EXPORT_SYMBOL(cpufreq_get_policy); -/* - * Locking: Must be called with the lock_cpu_hotplug() lock held - */ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) { int ret = 0; @@ -1427,8 +1436,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) if (!data) return -EINVAL; - lock_cpu_hotplug(); - /* lock this CPU */ mutex_lock(&data->lock); @@ -1439,8 +1446,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) data->user_policy.governor = data->governor; mutex_unlock(&data->lock); - - unlock_cpu_hotplug(); cpufreq_cpu_put(data); return ret; @@ -1464,7 +1469,6 @@ int cpufreq_update_policy(unsigned int cpu) if (!data) return -ENODEV; - lock_cpu_hotplug(); mutex_lock(&data->lock); dprintk("updating policy for CPU %u\n", cpu); @@ -1490,7 +1494,7 @@ int cpufreq_update_policy(unsigned int cpu) ret = __cpufreq_set_policy(data, &policy); mutex_unlock(&data->lock); - unlock_cpu_hotplug(); + cpufreq_cpu_put(data); return ret; } diff --git a/trunk/drivers/cpufreq/cpufreq_conservative.c b/trunk/drivers/cpufreq/cpufreq_conservative.c index c4c578defabf..b3ebc8f01975 100644 --- a/trunk/drivers/cpufreq/cpufreq_conservative.c +++ b/trunk/drivers/cpufreq/cpufreq_conservative.c @@ -525,6 +525,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, break; case CPUFREQ_GOV_LIMITS: + lock_cpu_hotplug(); mutex_lock(&dbs_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target( @@ -535,6 +536,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); mutex_unlock(&dbs_mutex); + unlock_cpu_hotplug(); break; } return 0; diff --git a/trunk/drivers/cpufreq/cpufreq_ondemand.c b/trunk/drivers/cpufreq/cpufreq_ondemand.c index 52cf1f021825..178f0c547eb7 100644 --- a/trunk/drivers/cpufreq/cpufreq_ondemand.c +++ b/trunk/drivers/cpufreq/cpufreq_ondemand.c @@ -309,9 +309,7 @@ static void do_dbs_timer(void *data) if (!dbs_info->enable) return; - lock_cpu_hotplug(); dbs_check_cpu(dbs_info); - unlock_cpu_hotplug(); queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); } @@ -414,6 +412,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, break; case CPUFREQ_GOV_LIMITS: + lock_cpu_hotplug(); mutex_lock(&dbs_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target(this_dbs_info->cur_policy, @@ -424,6 +423,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, policy->min, CPUFREQ_RELATION_L); mutex_unlock(&dbs_mutex); + unlock_cpu_hotplug(); break; } return 0; diff --git a/trunk/drivers/cpufreq/cpufreq_userspace.c b/trunk/drivers/cpufreq/cpufreq_userspace.c index a06c204589cd..44ae5e5b94cf 100644 --- a/trunk/drivers/cpufreq/cpufreq_userspace.c +++ b/trunk/drivers/cpufreq/cpufreq_userspace.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -71,7 +70,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy) dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); - lock_cpu_hotplug(); mutex_lock(&userspace_mutex); if (!cpu_is_managed[policy->cpu]) goto err; @@ -94,7 +92,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy) err: mutex_unlock(&userspace_mutex); - unlock_cpu_hotplug(); return ret; } diff --git a/trunk/include/linux/cpufreq.h b/trunk/include/linux/cpufreq.h index 4ea39fee99c7..35e137636b0b 100644 --- a/trunk/include/linux/cpufreq.h +++ b/trunk/include/linux/cpufreq.h @@ -172,6 +172,9 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int relation); +/* pass an event to the cpufreq governor */ +int cpufreq_governor(unsigned int cpu, unsigned int event); + int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); diff --git a/trunk/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/trunk/net/ipv4/netfilter/ip_conntrack_helper_h323.c index af35235672d5..9a39e2969712 100644 --- a/trunk/net/ipv4/netfilter/ip_conntrack_helper_h323.c +++ b/trunk/net/ipv4/netfilter/ip_conntrack_helper_h323.c @@ -1200,7 +1200,7 @@ static struct ip_conntrack_expect *find_expect(struct ip_conntrack *ct, tuple.dst.protonum = IPPROTO_TCP; exp = __ip_conntrack_expect_find(&tuple); - if (exp->master == ct) + if (exp && exp->master == ct) return exp; return NULL; }