Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 32825
b: refs/heads/master
c: 083edca
h: refs/heads/master
i:
  32823: f389a7d
v: v3
  • Loading branch information
Patrick McHardy authored and David S. Miller committed Jul 25, 2006
1 parent 9af4dfc commit 8c3da8d
Show file tree
Hide file tree
Showing 10 changed files with 74 additions and 72 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 153d7f3fcae7ed4e19328549aa9467acdfbced10
refs/heads/master: 083edca05ab1fa6efac1ba414018f7f45a4a83ff
2 changes: 1 addition & 1 deletion trunk/block/blktrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ static u32 bio_act[5] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_AC
#define trace_sync_bit(rw) \
(((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
#define trace_ahead_bit(rw) \
(((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
(((rw) & (1 << BIO_RW_AHEAD)) << (BIO_RW_AHEAD - 0))

/*
* The worker for the various blk_add_trace*() types. Fills out a
Expand Down
2 changes: 1 addition & 1 deletion trunk/block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -936,7 +936,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* seeks. so allow a little bit of time for him to submit a new rq
*/
if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
sl = min(sl, msecs_to_jiffies(2));
sl = 2;

mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
return 1;
Expand Down
86 changes: 41 additions & 45 deletions trunk/drivers/block/cciss.c
Original file line number Diff line number Diff line change
Expand Up @@ -1233,50 +1233,6 @@ static inline void complete_buffers(struct bio *bio, int status)
}
}

static void cciss_check_queues(ctlr_info_t *h)
{
int start_queue = h->next_to_run;
int i;

/* check to see if we have maxed out the number of commands that can
* be placed on the queue. If so then exit. We do this check here
* in case the interrupt we serviced was from an ioctl and did not
* free any new commands.
*/
if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
return;

/* We have room on the queue for more commands. Now we need to queue
* them up. We will also keep track of the next queue to run so
* that every queue gets a chance to be started first.
*/
for (i = 0; i < h->highest_lun + 1; i++) {
int curr_queue = (start_queue + i) % (h->highest_lun + 1);
/* make sure the disk has been added and the drive is real
* because this can be called from the middle of init_one.
*/
if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
continue;
blk_start_queue(h->gendisk[curr_queue]->queue);

/* check to see if we have maxed out the number of commands
* that can be placed on the queue.
*/
if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
if (curr_queue == start_queue) {
h->next_to_run =
(start_queue + 1) % (h->highest_lun + 1);
break;
} else {
h->next_to_run = curr_queue;
break;
}
} else {
curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
}
}
}

static void cciss_softirq_done(struct request *rq)
{
CommandList_struct *cmd = rq->completion_data;
Expand Down Expand Up @@ -1308,7 +1264,6 @@ static void cciss_softirq_done(struct request *rq)
spin_lock_irqsave(&h->lock, flags);
end_that_request_last(rq, rq->errors);
cmd_free(h, cmd, 1);
cciss_check_queues(h);
spin_unlock_irqrestore(&h->lock, flags);
}

Expand Down Expand Up @@ -2573,6 +2528,8 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
CommandList_struct *c;
unsigned long flags;
__u32 a, a1, a2;
int j;
int start_queue = h->next_to_run;

if (interrupt_not_for_us(h))
return IRQ_NONE;
Expand Down Expand Up @@ -2631,6 +2588,45 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
}
}

/* check to see if we have maxed out the number of commands that can
* be placed on the queue. If so then exit. We do this check here
* in case the interrupt we serviced was from an ioctl and did not
* free any new commands.
*/
if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
goto cleanup;

/* We have room on the queue for more commands. Now we need to queue
* them up. We will also keep track of the next queue to run so
* that every queue gets a chance to be started first.
*/
for (j = 0; j < h->highest_lun + 1; j++) {
int curr_queue = (start_queue + j) % (h->highest_lun + 1);
/* make sure the disk has been added and the drive is real
* because this can be called from the middle of init_one.
*/
if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
continue;
blk_start_queue(h->gendisk[curr_queue]->queue);

/* check to see if we have maxed out the number of commands
* that can be placed on the queue.
*/
if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
if (curr_queue == start_queue) {
h->next_to_run =
(start_queue + 1) % (h->highest_lun + 1);
goto cleanup;
} else {
h->next_to_run = curr_queue;
goto cleanup;
}
} else {
curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
}
}

cleanup:
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
return IRQ_HANDLED;
}
Expand Down
40 changes: 22 additions & 18 deletions trunk/drivers/cpufreq/cpufreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -364,12 +364,10 @@ static ssize_t store_##file_name \
if (ret != 1) \
return -EINVAL; \
\
lock_cpu_hotplug(); \
mutex_lock(&policy->lock); \
ret = __cpufreq_set_policy(policy, &new_policy); \
policy->user_policy.object = policy->object; \
mutex_unlock(&policy->lock); \
unlock_cpu_hotplug(); \
\
return ret ? ret : count; \
}
Expand Down Expand Up @@ -1199,18 +1197,20 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
*********************************************************************/


/* Must be called with lock_cpu_hotplug held */
int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
int retval = -EINVAL;

lock_cpu_hotplug();
dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
target_freq, relation);
if (cpu_online(policy->cpu) && cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);

unlock_cpu_hotplug();

return retval;
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
Expand All @@ -1225,23 +1225,17 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
if (!policy)
return -EINVAL;

lock_cpu_hotplug();
mutex_lock(&policy->lock);

ret = __cpufreq_driver_target(policy, target_freq, relation);

mutex_unlock(&policy->lock);
unlock_cpu_hotplug();

cpufreq_cpu_put(policy);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);

/*
* Locking: Must be called with the lock_cpu_hotplug() lock held
* when "event" is CPUFREQ_GOV_LIMITS
*/

static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
{
Expand All @@ -1263,6 +1257,24 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
}


int cpufreq_governor(unsigned int cpu, unsigned int event)
{
int ret = 0;
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);

if (!policy)
return -EINVAL;

mutex_lock(&policy->lock);
ret = __cpufreq_governor(policy, event);
mutex_unlock(&policy->lock);

cpufreq_cpu_put(policy);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_governor);


int cpufreq_register_governor(struct cpufreq_governor *governor)
{
struct cpufreq_governor *t;
Expand Down Expand Up @@ -1330,9 +1342,6 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
EXPORT_SYMBOL(cpufreq_get_policy);


/*
* Locking: Must be called with the lock_cpu_hotplug() lock held
*/
static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
{
int ret = 0;
Expand Down Expand Up @@ -1427,8 +1436,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
if (!data)
return -EINVAL;

lock_cpu_hotplug();

/* lock this CPU */
mutex_lock(&data->lock);

Expand All @@ -1439,8 +1446,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
data->user_policy.governor = data->governor;

mutex_unlock(&data->lock);

unlock_cpu_hotplug();
cpufreq_cpu_put(data);

return ret;
Expand All @@ -1464,7 +1469,6 @@ int cpufreq_update_policy(unsigned int cpu)
if (!data)
return -ENODEV;

lock_cpu_hotplug();
mutex_lock(&data->lock);

dprintk("updating policy for CPU %u\n", cpu);
Expand All @@ -1490,7 +1494,7 @@ int cpufreq_update_policy(unsigned int cpu)
ret = __cpufreq_set_policy(data, &policy);

mutex_unlock(&data->lock);
unlock_cpu_hotplug();

cpufreq_cpu_put(data);
return ret;
}
Expand Down
2 changes: 2 additions & 0 deletions trunk/drivers/cpufreq/cpufreq_conservative.c
Original file line number Diff line number Diff line change
Expand Up @@ -525,6 +525,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break;

case CPUFREQ_GOV_LIMITS:
lock_cpu_hotplug();
mutex_lock(&dbs_mutex);
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
Expand All @@ -535,6 +536,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L);
mutex_unlock(&dbs_mutex);
unlock_cpu_hotplug();
break;
}
return 0;
Expand Down
4 changes: 2 additions & 2 deletions trunk/drivers/cpufreq/cpufreq_ondemand.c
Original file line number Diff line number Diff line change
Expand Up @@ -309,9 +309,7 @@ static void do_dbs_timer(void *data)
if (!dbs_info->enable)
return;

lock_cpu_hotplug();
dbs_check_cpu(dbs_info);
unlock_cpu_hotplug();
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
}
Expand Down Expand Up @@ -414,6 +412,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break;

case CPUFREQ_GOV_LIMITS:
lock_cpu_hotplug();
mutex_lock(&dbs_mutex);
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(this_dbs_info->cur_policy,
Expand All @@ -424,6 +423,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
policy->min,
CPUFREQ_RELATION_L);
mutex_unlock(&dbs_mutex);
unlock_cpu_hotplug();
break;
}
return 0;
Expand Down
3 changes: 0 additions & 3 deletions trunk/drivers/cpufreq/cpufreq_userspace.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/cpufreq.h>
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
Expand Down Expand Up @@ -71,7 +70,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)

dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);

lock_cpu_hotplug();
mutex_lock(&userspace_mutex);
if (!cpu_is_managed[policy->cpu])
goto err;
Expand All @@ -94,7 +92,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)

err:
mutex_unlock(&userspace_mutex);
unlock_cpu_hotplug();
return ret;
}

Expand Down
3 changes: 3 additions & 0 deletions trunk/include/linux/cpufreq.h
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,9 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation);


/* pass an event to the cpufreq governor */
int cpufreq_governor(unsigned int cpu, unsigned int event);

int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);

Expand Down
2 changes: 1 addition & 1 deletion trunk/net/ipv4/netfilter/ip_conntrack_helper_h323.c
Original file line number Diff line number Diff line change
Expand Up @@ -1200,7 +1200,7 @@ static struct ip_conntrack_expect *find_expect(struct ip_conntrack *ct,
tuple.dst.protonum = IPPROTO_TCP;

exp = __ip_conntrack_expect_find(&tuple);
if (exp->master == ct)
if (exp && exp->master == ct)
return exp;
return NULL;
}
Expand Down

0 comments on commit 8c3da8d

Please sign in to comment.