Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 265027
b: refs/heads/master
c: 7e393a8
h: refs/heads/master
i:
  265025: b722674
  265023: 1677786
v: v3
  • Loading branch information
Andiry Xu authored and Greg Kroah-Hartman committed Sep 26, 2011
1 parent 707a25c commit b653f19
Show file tree
Hide file tree
Showing 5 changed files with 52 additions and 39 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c1045e87b2cd293d53dff19779ea46b19195d593
refs/heads/master: 7e393a834b41001174a8fb3ae3bc23a749467760
32 changes: 18 additions & 14 deletions trunk/drivers/usb/host/xhci-mem.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
* related flags, such as End TRB, Toggle Cycle, and no snoop.
*/
static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
struct xhci_segment *next, bool link_trbs)
struct xhci_segment *next, bool link_trbs, bool isoc)
{
u32 val;

Expand All @@ -95,7 +95,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
val &= ~TRB_TYPE_BITMASK;
val |= TRB_TYPE(TRB_LINK);
/* Always set the chain bit with 0.95 hardware */
if (xhci_link_trb_quirk(xhci))
/* Set chain bit for isoc rings on AMD 0.96 host */
if (xhci_link_trb_quirk(xhci) ||
(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
}
Expand Down Expand Up @@ -152,7 +154,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
* See section 4.9.1 and figures 15 and 16.
*/
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
unsigned int num_segs, bool link_trbs, gfp_t flags)
unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
{
struct xhci_ring *ring;
struct xhci_segment *prev;
Expand All @@ -178,12 +180,12 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
next = xhci_segment_alloc(xhci, flags);
if (!next)
goto fail;
xhci_link_segments(xhci, prev, next, link_trbs);
xhci_link_segments(xhci, prev, next, link_trbs, isoc);

prev = next;
num_segs--;
}
xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);

if (link_trbs) {
/* See section 4.9.2.1 and 6.4.4.1 */
Expand Down Expand Up @@ -229,14 +231,14 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
* pointers to the beginning of the ring.
*/
static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
struct xhci_ring *ring)
struct xhci_ring *ring, bool isoc)
{
struct xhci_segment *seg = ring->first_seg;
do {
memset(seg->trbs, 0,
sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
/* All endpoint rings have link TRBs */
xhci_link_segments(xhci, seg, seg->next, 1);
xhci_link_segments(xhci, seg, seg->next, 1, isoc);
seg = seg->next;
} while (seg != ring->first_seg);
xhci_initialize_ring_info(ring);
Expand Down Expand Up @@ -540,7 +542,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
*/
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
xhci_ring_alloc(xhci, 1, true, mem_flags);
xhci_ring_alloc(xhci, 1, true, false, mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
Expand Down Expand Up @@ -874,7 +876,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
}

/* Allocate endpoint 0 ring */
dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
if (!dev->eps[0].ring)
goto fail;

Expand Down Expand Up @@ -1315,10 +1317,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
*/
if (usb_endpoint_xfer_isoc(&ep->desc))
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 8, true, mem_flags);
xhci_ring_alloc(xhci, 8, true, true, mem_flags);
else
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 1, true, mem_flags);
xhci_ring_alloc(xhci, 1, true, false, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
Expand All @@ -1327,7 +1329,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
virt_dev->ring_cache[virt_dev->num_rings_cached];
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
virt_dev->num_rings_cached--;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
}
virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;
Expand Down Expand Up @@ -2236,7 +2239,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
goto fail;

/* Set up the command ring to have one segments for now. */
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
if (!xhci->cmd_ring)
goto fail;
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
Expand Down Expand Up @@ -2267,7 +2270,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* the event ring segment table (ERST). Section 4.9.3.
*/
xhci_dbg(xhci, "// Allocating event ring\n");
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
flags);
if (!xhci->event_ring)
goto fail;
if (xhci_check_trb_in_td_math(xhci, flags) < 0)
Expand Down
3 changes: 3 additions & 0 deletions trunk/drivers/usb/host/xhci-pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST;

if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96)
xhci->quirks |= XHCI_AMD_0x96_HOST;

/* AMD PLL quirk */
if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
xhci->quirks |= XHCI_AMD_PLL_FIX;
Expand Down
53 changes: 29 additions & 24 deletions trunk/drivers/usb/host/xhci-ring.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
* prepare_transfer()?
*/
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool consumer, bool more_trbs_coming)
bool consumer, bool more_trbs_coming, bool isoc)
{
u32 chain;
union xhci_trb *next;
Expand All @@ -212,11 +212,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
if (!chain && !more_trbs_coming)
break;

/* If we're not dealing with 0.95 hardware,
/* If we're not dealing with 0.95 hardware or
* isoc rings on AMD 0.96 host,
* carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
if (!xhci_link_trb_quirk(xhci)) {
if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))
&& !xhci_link_trb_quirk(xhci)) {
next->link.control &=
cpu_to_le32(~TRB_CHAIN);
next->link.control |=
Expand Down Expand Up @@ -2391,7 +2393,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
* prepare_transfer()?
*/
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool consumer, bool more_trbs_coming,
bool consumer, bool more_trbs_coming, bool isoc,
u32 field1, u32 field2, u32 field3, u32 field4)
{
struct xhci_generic_trb *trb;
Expand All @@ -2401,15 +2403,15 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
trb->field[3] = cpu_to_le32(field4);
inc_enq(xhci, ring, consumer, more_trbs_coming);
inc_enq(xhci, ring, consumer, more_trbs_coming, isoc);
}

/*
* Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
* FIXME allocate segments if the ring is full.
*/
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags)
{
/* Make sure the endpoint has been added to xHC schedule */
switch (ep_state) {
Expand Down Expand Up @@ -2451,10 +2453,11 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
next = ring->enqueue;

while (last_trb(xhci, ring, ring->enq_seg, next)) {
/* If we're not dealing with 0.95 hardware,
* clear the chain bit.
/* If we're not dealing with 0.95 hardware or isoc rings
* on AMD 0.96 host, clear the chain bit.
*/
if (!xhci_link_trb_quirk(xhci))
if (!xhci_link_trb_quirk(xhci) && !(isoc &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
next->link.control &= cpu_to_le32(~TRB_CHAIN);
else
next->link.control |= cpu_to_le32(TRB_CHAIN);
Expand Down Expand Up @@ -2487,6 +2490,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
unsigned int num_trbs,
struct urb *urb,
unsigned int td_index,
bool isoc,
gfp_t mem_flags)
{
int ret;
Expand All @@ -2504,7 +2508,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,

ret = prepare_ring(xhci, ep_ring,
le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
num_trbs, mem_flags);
num_trbs, isoc, mem_flags);
if (ret)
return ret;

Expand Down Expand Up @@ -2727,7 +2731,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,

trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
num_trbs, urb, 0, false, mem_flags);
if (trb_buff_len < 0)
return trb_buff_len;

Expand Down Expand Up @@ -2822,7 +2826,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = true;
else
more_trbs_coming = false;
queue_trb(xhci, ep_ring, false, more_trbs_coming,
queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
Expand Down Expand Up @@ -2913,7 +2917,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,

ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
num_trbs, urb, 0, false, mem_flags);
if (ret < 0)
return ret;

Expand Down Expand Up @@ -2985,7 +2989,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = true;
else
more_trbs_coming = false;
queue_trb(xhci, ep_ring, false, more_trbs_coming,
queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
Expand Down Expand Up @@ -3045,7 +3049,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs++;
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
num_trbs, urb, 0, false, mem_flags);
if (ret < 0)
return ret;

Expand Down Expand Up @@ -3078,7 +3082,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
}

queue_trb(xhci, ep_ring, false, true,
queue_trb(xhci, ep_ring, false, true, false,
setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
TRB_LEN(8) | TRB_INTR_TARGET(0),
Expand All @@ -3098,7 +3102,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN;
queue_trb(xhci, ep_ring, false, true,
queue_trb(xhci, ep_ring, false, true, false,
lower_32_bits(urb->transfer_dma),
upper_32_bits(urb->transfer_dma),
length_field,
Expand All @@ -3114,7 +3118,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field = 0;
else
field = TRB_DIR_IN;
queue_trb(xhci, ep_ring, false, false,
queue_trb(xhci, ep_ring, false, false, false,
0,
0,
TRB_INTR_TARGET(0),
Expand Down Expand Up @@ -3263,7 +3267,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);

ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
urb->stream_id, trbs_per_td, urb, i, mem_flags);
urb->stream_id, trbs_per_td, urb, i, true,
mem_flags);
if (ret < 0) {
if (i == 0)
return ret;
Expand Down Expand Up @@ -3333,7 +3338,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
remainder |
TRB_INTR_TARGET(0);

queue_trb(xhci, ep_ring, false, more_trbs_coming,
queue_trb(xhci, ep_ring, false, more_trbs_coming, true,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
Expand Down Expand Up @@ -3415,7 +3420,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* Do not insert any td of the urb to the ring if the check failed.
*/
ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
num_trbs, mem_flags);
num_trbs, true, mem_flags);
if (ret)
return ret;

Expand Down Expand Up @@ -3474,16 +3479,16 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
reserved_trbs++;

ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
reserved_trbs, GFP_ATOMIC);
reserved_trbs, false, GFP_ATOMIC);
if (ret < 0) {
xhci_err(xhci, "ERR: No room for command on command ring\n");
if (command_must_succeed)
xhci_err(xhci, "ERR: Reserved TRB counting for "
"unfailable commands failed.\n");
return ret;
}
queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
field4 | xhci->cmd_ring->cycle_state);
queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2,
field3, field4 | xhci->cmd_ring->cycle_state);
return 0;
}

Expand Down
1 change: 1 addition & 0 deletions trunk/drivers/usb/host/xhci.h
Original file line number Diff line number Diff line change
Expand Up @@ -1462,6 +1462,7 @@ struct xhci_hcd {
#define XHCI_BROKEN_MSI (1 << 6)
#define XHCI_RESET_ON_RESUME (1 << 7)
#define XHCI_SW_BW_CHECKING (1 << 8)
#define XHCI_AMD_0x96_HOST (1 << 9)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
Expand Down

0 comments on commit b653f19

Please sign in to comment.