Skip to content

Commit

Permalink
Staging: batman-adv: Use forw_bcast_list_lock always with disabled in…
Browse files Browse the repository at this point in the history
…terrupts

forw_bcast_list_lock is spin_locked in both process and softirq context.
SoftIRQ calls the spinlock with disabled IRQ and normal process context
with enabled IRQs.

When process context is inside an spin_locked area protected by
forw_bcast_list_lock and gets interrupted by an IRQ, it could happen
that something tries to lock forw_bcast_list_lock again in SoftIRQ
context. It cannot proceed further since the lock is already taken
somewhere else, but no reschedule will happen inside the SoftIRQ
context. This leads to an complete kernel hang without any chance of
resurrection.

All functions called in process context must disable IRQs when they try
to get get that lock to to prevent any reschedule due to IRQs.

Signed-off-by: Sven Eckelmann <sven.eckelmann@gmx.de>
Acked-by: Marek Lindner <lindner_marek@yahoo.de>
Signed-off-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
  • Loading branch information
Sven Eckelmann authored and Greg Kroah-Hartman committed Mar 4, 2010
1 parent 5ea84fa commit cec4a69
Showing 1 changed file with 11 additions and 8 deletions.
19 changes: 11 additions & 8 deletions drivers/staging/batman-adv/send.c
Original file line number Diff line number Diff line change
Expand Up @@ -338,12 +338,13 @@ static void forw_packet_free(struct forw_packet *forw_packet)
static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
unsigned long send_time)
{
unsigned long flags;
INIT_HLIST_NODE(&forw_packet->list);

/* add new packet to packet list */
spin_lock(&forw_bcast_list_lock);
spin_lock_irqsave(&forw_bcast_list_lock, flags);
hlist_add_head(&forw_packet->list, &forw_bcast_list);
spin_unlock(&forw_bcast_list_lock);
spin_unlock_irqrestore(&forw_bcast_list_lock, flags);

/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet->delayed_work,
Expand Down Expand Up @@ -382,10 +383,11 @@ void send_outstanding_bcast_packet(struct work_struct *work)
container_of(work, struct delayed_work, work);
struct forw_packet *forw_packet =
container_of(delayed_work, struct forw_packet, delayed_work);
unsigned long flags;

spin_lock(&forw_bcast_list_lock);
spin_lock_irqsave(&forw_bcast_list_lock, flags);
hlist_del(&forw_packet->list);
spin_unlock(&forw_bcast_list_lock);
spin_unlock_irqrestore(&forw_bcast_list_lock, flags);

/* rebroadcast packet */
rcu_read_lock();
Expand Down Expand Up @@ -436,24 +438,25 @@ void purge_outstanding_packets(void)
{
struct forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node;
unsigned long flags;

bat_dbg(DBG_BATMAN, "purge_outstanding_packets()\n");

/* free bcast list */
spin_lock(&forw_bcast_list_lock);
spin_lock_irqsave(&forw_bcast_list_lock, flags);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&forw_bcast_list, list) {

spin_unlock(&forw_bcast_list_lock);
spin_unlock_irqrestore(&forw_bcast_list_lock, flags);

/**
* send_outstanding_bcast_packet() will lock the list to
* delete the item from the list
*/
cancel_delayed_work_sync(&forw_packet->delayed_work);
spin_lock(&forw_bcast_list_lock);
spin_lock_irqsave(&forw_bcast_list_lock, flags);
}
spin_unlock(&forw_bcast_list_lock);
spin_unlock_irqrestore(&forw_bcast_list_lock, flags);

/* free batman packet list */
spin_lock(&forw_bat_list_lock);
Expand Down

0 comments on commit cec4a69

Please sign in to comment.