Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 55952
b: refs/heads/master
c: 294cc44
h: refs/heads/master
v: v3
  • Loading branch information
Herbert Xu authored and David S. Miller committed May 11, 2007
1 parent 2957ed7 commit 3bd0d04
Show file tree
Hide file tree
Showing 2 changed files with 68 additions and 24 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 572a103ded0ad880f75ce83e99f0512fbb80b5b0
refs/heads/master: 294cc44b7e48a6e7732499eebcf409b231460d8e
90 changes: 67 additions & 23 deletions trunk/net/core/link_watch.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,11 +77,52 @@ static void rfc2863_policy(struct net_device *dev)
}


/* Must be called with the rtnl semaphore held */
void linkwatch_run_queue(void)
static int linkwatch_urgent_event(struct net_device *dev)
{
return netif_running(dev) && netif_carrier_ok(dev) &&
dev->qdisc != dev->qdisc_sleeping;
}


static void linkwatch_add_event(struct net_device *dev)
{
unsigned long flags;

spin_lock_irqsave(&lweventlist_lock, flags);
dev->link_watch_next = lweventlist;
lweventlist = dev;
spin_unlock_irqrestore(&lweventlist_lock, flags);
}


static void linkwatch_schedule_work(unsigned long delay)
{
if (test_and_set_bit(LW_RUNNING, &linkwatch_flags))
return;

/* If we wrap around we'll delay it by at most HZ. */
if (delay > HZ)
delay = 0;

schedule_delayed_work(&linkwatch_work, delay);
}


static void __linkwatch_run_queue(int urgent_only)
{
struct net_device *next;

/*
* Limit the number of linkwatch events to one
* per second so that a runaway driver does not
* cause a storm of messages on the netlink
* socket. This limit does not apply to up events
* while the device qdisc is down.
*/
if (!urgent_only)
linkwatch_nextevent = jiffies + HZ;
clear_bit(LW_RUNNING, &linkwatch_flags);

spin_lock_irq(&lweventlist_lock);
next = lweventlist;
lweventlist = NULL;
Expand All @@ -92,6 +133,11 @@ void linkwatch_run_queue(void)

next = dev->link_watch_next;

if (urgent_only && !linkwatch_urgent_event(dev)) {
linkwatch_add_event(dev);
continue;
}

/*
* Make sure the above read is complete since it can be
* rewritten as soon as we clear the bit below.
Expand All @@ -116,45 +162,43 @@ void linkwatch_run_queue(void)

dev_put(dev);
}

if (lweventlist)
linkwatch_schedule_work(linkwatch_nextevent - jiffies);
}


static void linkwatch_event(struct work_struct *dummy)
/* Must be called with the rtnl semaphore held */
void linkwatch_run_queue(void)
{
/* Limit the number of linkwatch events to one
* per second so that a runaway driver does not
* cause a storm of messages on the netlink
* socket
*/
linkwatch_nextevent = jiffies + HZ;
clear_bit(LW_RUNNING, &linkwatch_flags);
__linkwatch_run_queue(0);
}


static void linkwatch_event(struct work_struct *dummy)
{
rtnl_lock();
linkwatch_run_queue();
__linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
rtnl_unlock();
}


void linkwatch_fire_event(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
unsigned long flags;
unsigned long delay;

dev_hold(dev);

spin_lock_irqsave(&lweventlist_lock, flags);
dev->link_watch_next = lweventlist;
lweventlist = dev;
spin_unlock_irqrestore(&lweventlist_lock, flags);
linkwatch_add_event(dev);

if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) {
unsigned long delay = linkwatch_nextevent - jiffies;
delay = linkwatch_nextevent - jiffies;

/* If we wrap around we'll delay it by at most HZ. */
if (delay > HZ)
delay = 0;
schedule_delayed_work(&linkwatch_work, delay);
}
/* Minimise down-time: drop delay for up event. */
if (linkwatch_urgent_event(dev))
delay = 0;

linkwatch_schedule_work(delay);
}
}

Expand Down

0 comments on commit 3bd0d04

Please sign in to comment.