Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 364377
b: refs/heads/master
c: 214ac7a
h: refs/heads/master
i:
  364375: 45d6b31
v: v3
  • Loading branch information
Alan Stern authored and Greg Kroah-Hartman committed Mar 25, 2013
1 parent c29eaaf commit 25410a2
Show file tree
Hide file tree
Showing 5 changed files with 61 additions and 47 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6e018751a35f6ef7ad04eb8006b5886b6a7c47f5
refs/heads/master: 214ac7a0771d95d2f66d01bca5afeb2c9e8ac3c8
4 changes: 2 additions & 2 deletions trunk/drivers/usb/host/ehci-hcd.c
Original file line number Diff line number Diff line change
Expand Up @@ -483,7 +483,7 @@ static int ehci_init(struct usb_hcd *hcd)
*/
ehci->periodic_size = DEFAULT_I_TDPS;
INIT_LIST_HEAD(&ehci->async_unlink);
INIT_LIST_HEAD(&ehci->async_iaa);
INIT_LIST_HEAD(&ehci->async_idle);
INIT_LIST_HEAD(&ehci->intr_unlink);
INIT_LIST_HEAD(&ehci->intr_qh_list);
INIT_LIST_HEAD(&ehci->cached_itd_list);
Expand Down Expand Up @@ -752,7 +752,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* guard against (alleged) silicon errata */
if (cmd & CMD_IAAD)
ehci_dbg(ehci, "IAA with IAAD still set?\n");
if (!list_empty(&ehci->async_iaa))
if (ehci->iaa_in_progress)
COUNT(ehci->stats.iaa);
end_unlink_async(ehci);
}
Expand Down
97 changes: 55 additions & 42 deletions trunk/drivers/usb/host/ehci-q.c
Original file line number Diff line number Diff line change
Expand Up @@ -960,7 +960,7 @@ static void disable_async(struct ehci_hcd *ehci)

/* The async schedule and unlink lists are supposed to be empty */
WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
!list_empty(&ehci->async_iaa));
!list_empty(&ehci->async_idle));

/* Don't turn off the schedule until ASS is 1 */
ehci_poll_ASS(ehci);
Expand Down Expand Up @@ -1164,41 +1164,19 @@ static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
ehci->qh_scan_next = qh->qh_next.qh;
}

static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)
static void start_iaa_cycle(struct ehci_hcd *ehci)
{
/*
* Do nothing if an IAA cycle is already running or
* if one will be started shortly.
*/
if (!list_empty(&ehci->async_iaa) || ehci->async_unlinking)
/* Do nothing if an IAA cycle is already running */
if (ehci->iaa_in_progress)
return;
ehci->iaa_in_progress = true;

/* If the controller isn't running, we don't have to wait for it */
if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {

/* Do all the waiting QHs */
list_splice_tail_init(&ehci->async_unlink, &ehci->async_iaa);

if (!nested) /* Avoid recursion */
end_unlink_async(ehci);
end_unlink_async(ehci);

/* Otherwise start a new IAA cycle */
} else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
struct ehci_qh *qh;

/* Do only the first waiting QH (nVidia bug?) */
qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
unlink_node);

/*
* Intel (?) bug: The HC can write back the overlay region
* even after the IAA interrupt occurs. In self-defense,
* always go through two IAA cycles for each QH.
*/
if (qh->qh_state == QH_STATE_UNLINK_WAIT)
qh->qh_state = QH_STATE_UNLINK;
else
list_move_tail(&qh->unlink_node, &ehci->async_iaa);

/* Make sure the unlinks are all visible to the hardware */
wmb();
Expand All @@ -1215,16 +1193,59 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)
static void end_unlink_async(struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
bool early_exit;

if (ehci->has_synopsys_hc_bug)
ehci_writel(ehci, (u32) ehci->async->qh_dma,
&ehci->regs->async_next);

/* The current IAA cycle has ended */
ehci->iaa_in_progress = false;

if (list_empty(&ehci->async_unlink))
return;
qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
unlink_node); /* QH whose IAA cycle just ended */

/*
* If async_unlinking is set then this routine is already running,
* either on the stack or on another CPU.
*/
early_exit = ehci->async_unlinking;

/* If the controller isn't running, process all the waiting QHs */
if (ehci->rh_state < EHCI_RH_RUNNING)
list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle);

/*
* Intel (?) bug: The HC can write back the overlay region even
* after the IAA interrupt occurs. In self-defense, always go
* through two IAA cycles for each QH.
*/
else if (qh->qh_state == QH_STATE_UNLINK_WAIT) {
qh->qh_state = QH_STATE_UNLINK;
early_exit = true;
}

/* Otherwise process only the first waiting QH (NVIDIA bug?) */
else
list_move_tail(&qh->unlink_node, &ehci->async_idle);

/* Start a new IAA cycle if any QHs are waiting for it */
if (!list_empty(&ehci->async_unlink))
start_iaa_cycle(ehci);

/*
* Don't allow nesting or concurrent calls,
* or wait for the second IAA cycle for the next QH.
*/
if (early_exit)
return;

/* Process the idle QHs */
restart:
ehci->async_unlinking = true;
while (!list_empty(&ehci->async_iaa)) {
qh = list_first_entry(&ehci->async_iaa, struct ehci_qh,
while (!list_empty(&ehci->async_idle)) {
qh = list_first_entry(&ehci->async_idle, struct ehci_qh,
unlink_node);
list_del(&qh->unlink_node);

Expand All @@ -1239,13 +1260,6 @@ static void end_unlink_async(struct ehci_hcd *ehci)
disable_async(ehci);
}
ehci->async_unlinking = false;

/* Start a new IAA cycle if any QHs are waiting for it */
if (!list_empty(&ehci->async_unlink)) {
start_iaa_cycle(ehci, true);
if (unlikely(ehci->rh_state < EHCI_RH_RUNNING))
goto restart;
}
}

static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
Expand All @@ -1270,8 +1284,7 @@ static void unlink_empty_async(struct ehci_hcd *ehci)
}

/* If nothing else is being unlinked, unlink the last empty QH */
if (list_empty(&ehci->async_iaa) && list_empty(&ehci->async_unlink) &&
qh_to_unlink) {
if (list_empty(&ehci->async_unlink) && qh_to_unlink) {
start_unlink_async(ehci, qh_to_unlink);
--count;
}
Expand All @@ -1293,7 +1306,7 @@ static void unlink_empty_async_suspended(struct ehci_hcd *ehci)
WARN_ON(!list_empty(&qh->qtd_list));
single_unlink_async(ehci, qh);
}
start_iaa_cycle(ehci, false);
start_iaa_cycle(ehci);
}

/* makes sure the async qh will become idle */
Expand All @@ -1306,7 +1319,7 @@ static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
return;

single_unlink_async(ehci, qh);
start_iaa_cycle(ehci, false);
start_iaa_cycle(ehci);
}

/*-------------------------------------------------------------------------*/
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/usb/host/ehci-timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
* (a) SMP races against real IAA firing and retriggering, and
* (b) clean HC shutdown, when IAA watchdog was pending.
*/
if (ehci->rh_state != EHCI_RH_RUNNING)
if (!ehci->iaa_in_progress || ehci->rh_state != EHCI_RH_RUNNING)
return;

/* If we get here, IAA is *REALLY* late. It's barely
Expand Down
3 changes: 2 additions & 1 deletion trunk/drivers/usb/host/ehci.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ struct ehci_hcd { /* one per controller */
bool scanning:1;
bool need_rescan:1;
bool intr_unlinking:1;
bool iaa_in_progress:1;
bool async_unlinking:1;
bool shutdown:1;
struct ehci_qh *qh_scan_next;
Expand All @@ -129,7 +130,7 @@ struct ehci_hcd { /* one per controller */
struct ehci_qh *async;
struct ehci_qh *dummy; /* For AMD quirk use */
struct list_head async_unlink;
struct list_head async_iaa;
struct list_head async_idle;
unsigned async_unlink_cycle;
unsigned async_count; /* async activity count */

Expand Down

0 comments on commit 25410a2

Please sign in to comment.