diff --git a/[refs] b/[refs] index 238bbf4876b3..4fe7e5a4a183 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 7de8b9261d6abbb2bd71aab6a5ac0981696dcd2c +refs/heads/master: f3a756883ac028c536479e2eb283477fec80c828 diff --git a/trunk/drivers/char/n_tty.c b/trunk/drivers/char/n_tty.c index 973be2f44195..ff47907ff1bf 100644 --- a/trunk/drivers/char/n_tty.c +++ b/trunk/drivers/char/n_tty.c @@ -1583,7 +1583,6 @@ static int n_tty_open(struct tty_struct *tty) static inline int input_available_p(struct tty_struct *tty, int amt) { - tty_flush_to_ldisc(tty); if (tty->icanon) { if (tty->canon_data) return 1; diff --git a/trunk/drivers/char/pty.c b/trunk/drivers/char/pty.c index 6e6942c45f5b..3850a68f265a 100644 --- a/trunk/drivers/char/pty.c +++ b/trunk/drivers/char/pty.c @@ -52,6 +52,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp) return; tty->link->packet = 0; set_bit(TTY_OTHER_CLOSED, &tty->link->flags); + tty_flip_buffer_push(tty->link); wake_up_interruptible(&tty->link->read_wait); wake_up_interruptible(&tty->link->write_wait); if (tty->driver->subtype == PTY_TYPE_MASTER) { @@ -207,6 +208,7 @@ static int pty_open(struct tty_struct *tty, struct file *filp) clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); set_bit(TTY_THROTTLED, &tty->flags); retval = 0; + tty->low_latency = 1; out: return retval; } diff --git a/trunk/drivers/char/tty_buffer.c b/trunk/drivers/char/tty_buffer.c index 3108991c5c8b..810ee25d66a4 100644 --- a/trunk/drivers/char/tty_buffer.c +++ b/trunk/drivers/char/tty_buffer.c @@ -461,19 +461,6 @@ static void flush_to_ldisc(struct work_struct *work) tty_ldisc_deref(disc); } -/** - * tty_flush_to_ldisc - * @tty: tty to push - * - * Push the terminal flip buffers to the line discipline. - * - * Must not be called from IRQ context. - */ -void tty_flush_to_ldisc(struct tty_struct *tty) -{ - flush_to_ldisc(&tty->buf.work.work); -} - /** * tty_flip_buffer_push - terminal * @tty: tty to push diff --git a/trunk/drivers/hwmon/asus_atk0110.c b/trunk/drivers/hwmon/asus_atk0110.c index fe4fa29c9219..bff0103610c1 100644 --- a/trunk/drivers/hwmon/asus_atk0110.c +++ b/trunk/drivers/hwmon/asus_atk0110.c @@ -593,11 +593,7 @@ static int atk_add_sensor(struct atk_data *data, union acpi_object *obj) sensor->data = data; sensor->id = flags->integer.value; sensor->limit1 = limit1->integer.value; - if (data->old_interface) - sensor->limit2 = limit2->integer.value; - else - /* The upper limit is expressed as delta from lower limit */ - sensor->limit2 = sensor->limit1 + limit2->integer.value; + sensor->limit2 = limit2->integer.value; snprintf(sensor->input_attr_name, ATTR_NAME_SIZE, "%s%d_input", base_name, start + *num); diff --git a/trunk/drivers/hwmon/smsc47m1.c b/trunk/drivers/hwmon/smsc47m1.c index ba75bfcf14ce..a92dbb97ee99 100644 --- a/trunk/drivers/hwmon/smsc47m1.c +++ b/trunk/drivers/hwmon/smsc47m1.c @@ -86,7 +86,6 @@ superio_exit(void) #define SUPERIO_REG_ACT 0x30 #define SUPERIO_REG_BASE 0x60 #define SUPERIO_REG_DEVID 0x20 -#define SUPERIO_REG_DEVREV 0x21 /* Logical device registers */ @@ -430,9 +429,6 @@ static int __init smsc47m1_find(unsigned short *addr, * The LPC47M292 (device id 0x6B) is somewhat compatible, but it * supports a 3rd fan, and the pin configuration registers are * unfortunately different. - * The LPC47M233 has the same device id (0x6B) but is not compatible. - * We check the high bit of the device revision register to - * differentiate them. */ switch (val) { case 0x51: @@ -452,13 +448,6 @@ static int __init smsc47m1_find(unsigned short *addr, sio_data->type = smsc47m1; break; case 0x6B: - if (superio_inb(SUPERIO_REG_DEVREV) & 0x80) { - pr_debug(DRVNAME ": " - "Found SMSC LPC47M233, unsupported\n"); - superio_exit(); - return -ENODEV; - } - pr_info(DRVNAME ": Found SMSC LPC47M292\n"); sio_data->type = smsc47m2; break; diff --git a/trunk/drivers/i2c/chips/tsl2550.c b/trunk/drivers/i2c/chips/tsl2550.c index b96f3025e588..1a9cc135219f 100644 --- a/trunk/drivers/i2c/chips/tsl2550.c +++ b/trunk/drivers/i2c/chips/tsl2550.c @@ -27,7 +27,7 @@ #include #define TSL2550_DRV_NAME "tsl2550" -#define DRIVER_VERSION "1.1.2" +#define DRIVER_VERSION "1.1.1" /* * Defines @@ -189,16 +189,13 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1) u8 r = 128; /* Avoid division by 0 and count 1 cannot be greater than count 0 */ - if (c1 <= c0) - if (c0) { - r = c1 * 128 / c0; - - /* Calculate LUX */ - lux = ((c0 - c1) * ratio_lut[r]) / 256; - } else - lux = 0; + if (c0 && (c1 <= c0)) + r = c1 * 128 / c0; else - return -EAGAIN; + return -1; + + /* Calculate LUX */ + lux = ((c0 - c1) * ratio_lut[r]) / 256; /* LUX range check */ return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; diff --git a/trunk/drivers/isdn/mISDN/l1oip_core.c b/trunk/drivers/isdn/mISDN/l1oip_core.c index c3b661a666cb..990e6a7e6674 100644 --- a/trunk/drivers/isdn/mISDN/l1oip_core.c +++ b/trunk/drivers/isdn/mISDN/l1oip_core.c @@ -731,10 +731,10 @@ l1oip_socket_thread(void *data) while (!signal_pending(current)) { struct kvec iov = { .iov_base = recvbuf, - .iov_len = recvbuf_size, + .iov_len = sizeof(recvbuf), }; recvlen = kernel_recvmsg(socket, &msg, &iov, 1, - recvbuf_size, 0); + sizeof(recvbuf), 0); if (recvlen > 0) { l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen); } else { diff --git a/trunk/drivers/staging/serqt_usb2/serqt_usb2.c b/trunk/drivers/staging/serqt_usb2/serqt_usb2.c index a9bd4106beb7..f9ff9c266780 100644 --- a/trunk/drivers/staging/serqt_usb2/serqt_usb2.c +++ b/trunk/drivers/staging/serqt_usb2/serqt_usb2.c @@ -360,18 +360,18 @@ static void qt_read_bulk_callback(struct urb *urb) if (port_paranoia_check(port, __func__) != 0) { dbg("%s - port_paranoia_check, exiting\n", __func__); qt_port->ReadBulkStopped = 1; - return; + goto exit; } if (!serial) { dbg("%s - bad serial pointer, exiting\n", __func__); - return; + goto exit; } if (qt_port->closePending == 1) { /* Were closing , stop reading */ dbg("%s - (qt_port->closepending == 1\n", __func__); qt_port->ReadBulkStopped = 1; - return; + goto exit; } /* @@ -381,7 +381,7 @@ static void qt_read_bulk_callback(struct urb *urb) */ if (qt_port->RxHolding == 1) { qt_port->ReadBulkStopped = 1; - return; + goto exit; } if (urb->status) { @@ -389,7 +389,7 @@ static void qt_read_bulk_callback(struct urb *urb) dbg("%s - nonzero read bulk status received: %d\n", __func__, urb->status); - return; + goto exit; } if (tty && RxCount) { @@ -463,6 +463,8 @@ static void qt_read_bulk_callback(struct urb *urb) } schedule_work(&port->work); +exit: + tty_kref_put(tty); } /* @@ -1041,7 +1043,7 @@ static void qt_block_until_empty(struct tty_struct *tty, } } -static void qt_close( struct usb_serial_port *port) +static void qt_close(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct quatech_port *qt_port; @@ -1068,6 +1070,7 @@ static void qt_close( struct usb_serial_port *port) /* wait up to for transmitter to empty */ if (serial->dev) qt_block_until_empty(tty, qt_port); + tty_kref_put(tty); /* Close uart channel */ status = qt_close_channel(serial, index); diff --git a/trunk/drivers/usb/core/config.c b/trunk/drivers/usb/core/config.c index a16c538d0132..24dfb33f90cb 100644 --- a/trunk/drivers/usb/core/config.c +++ b/trunk/drivers/usb/core/config.c @@ -80,18 +80,38 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, int max_tx; int i; + /* Allocate space for the SS endpoint companion descriptor */ + ep->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp), + GFP_KERNEL); + if (!ep->ss_ep_comp) + return -ENOMEM; desc = (struct usb_ss_ep_comp_descriptor *) buffer; if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) { dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " " interface %d altsetting %d ep %d: " "using minimum values\n", cfgno, inum, asnum, ep->desc.bEndpointAddress); + ep->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE; + ep->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP; + ep->ss_ep_comp->desc.bMaxBurst = 0; + /* + * Leave bmAttributes as zero, which will mean no streams for + * bulk, and isoc won't support multiple bursts of packets. + * With bursts of only one packet, and a Mult of 1, the max + * amount of data moved per endpoint service interval is one + * packet. + */ + if (usb_endpoint_xfer_isoc(&ep->desc) || + usb_endpoint_xfer_int(&ep->desc)) + ep->ss_ep_comp->desc.wBytesPerInterval = + ep->desc.wMaxPacketSize; /* * The next descriptor is for an Endpoint or Interface, * no extra descriptors to copy into the companion structure, * and we didn't eat up any of the buffer. */ - return 0; + retval = 0; + goto valid; } memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE); desc = &ep->ss_ep_comp->desc; @@ -300,28 +320,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, buffer += i; size -= i; - /* Allocate space for the SS endpoint companion descriptor */ - endpoint->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp), - GFP_KERNEL); - if (!endpoint->ss_ep_comp) - return -ENOMEM; - - /* Fill in some default values (may be overwritten later) */ - endpoint->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE; - endpoint->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP; - endpoint->ss_ep_comp->desc.bMaxBurst = 0; - /* - * Leave bmAttributes as zero, which will mean no streams for - * bulk, and isoc won't support multiple bursts of packets. - * With bursts of only one packet, and a Mult of 1, the max - * amount of data moved per endpoint service interval is one - * packet. - */ - if (usb_endpoint_xfer_isoc(&endpoint->desc) || - usb_endpoint_xfer_int(&endpoint->desc)) - endpoint->ss_ep_comp->desc.wBytesPerInterval = - endpoint->desc.wMaxPacketSize; - if (size > 0) { retval = usb_parse_ss_endpoint_companion(ddev, cfgno, inum, asnum, endpoint, num_ep, buffer, @@ -331,10 +329,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, retval = buffer - buffer0; } } else { - dev_warn(ddev, "config %d interface %d altsetting %d " - "endpoint 0x%X has no " - "SuperSpeed companion descriptor\n", - cfgno, inum, asnum, d->bEndpointAddress); retval = buffer - buffer0; } } else { diff --git a/trunk/drivers/usb/host/ehci-orion.c b/trunk/drivers/usb/host/ehci-orion.c index 1d283e1b2b8d..dc2ac613a9d1 100644 --- a/trunk/drivers/usb/host/ehci-orion.c +++ b/trunk/drivers/usb/host/ehci-orion.c @@ -105,7 +105,6 @@ static int ehci_orion_setup(struct usb_hcd *hcd) struct ehci_hcd *ehci = hcd_to_ehci(hcd); int retval; - ehci_reset(ehci); retval = ehci_halt(ehci); if (retval) return retval; @@ -119,6 +118,7 @@ static int ehci_orion_setup(struct usb_hcd *hcd) hcd->has_tt = 1; + ehci_reset(ehci); ehci_port_power(ehci, 0); return retval; diff --git a/trunk/drivers/usb/host/ohci-omap.c b/trunk/drivers/usb/host/ohci-omap.c index 83cbecd2a1ed..f3aaba35e912 100644 --- a/trunk/drivers/usb/host/ohci-omap.c +++ b/trunk/drivers/usb/host/ohci-omap.c @@ -282,7 +282,6 @@ static int ohci_omap_init(struct usb_hcd *hcd) static void ohci_omap_stop(struct usb_hcd *hcd) { dev_dbg(hcd->self.controller, "stopping USB Controller\n"); - ohci_stop(hcd); omap_ohci_clock_power(0); } diff --git a/trunk/drivers/usb/host/xhci-dbg.c b/trunk/drivers/usb/host/xhci-dbg.c index 705e34324156..2501c571f855 100644 --- a/trunk/drivers/usb/host/xhci-dbg.c +++ b/trunk/drivers/usb/host/xhci-dbg.c @@ -173,7 +173,6 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int { void *addr; u32 temp; - u64 temp_64; addr = &ir_set->irq_pending; temp = xhci_readl(xhci, addr); @@ -201,15 +200,25 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n", addr, (unsigned int)temp); - addr = &ir_set->erst_base; - temp_64 = xhci_read_64(xhci, addr); - xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n", - addr, temp_64); + addr = &ir_set->erst_base[0]; + temp = xhci_readl(xhci, addr); + xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n", + addr, (unsigned int) temp); + + addr = &ir_set->erst_base[1]; + temp = xhci_readl(xhci, addr); + xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n", + addr, (unsigned int) temp); - addr = &ir_set->erst_dequeue; - temp_64 = xhci_read_64(xhci, addr); - xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n", - addr, temp_64); + addr = &ir_set->erst_dequeue[0]; + temp = xhci_readl(xhci, addr); + xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n", + addr, (unsigned int) temp); + + addr = &ir_set->erst_dequeue[1]; + temp = xhci_readl(xhci, addr); + xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n", + addr, (unsigned int) temp); } void xhci_print_run_regs(struct xhci_hcd *xhci) @@ -259,7 +268,8 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) xhci_dbg(xhci, "Link TRB:\n"); xhci_print_trb_offsets(xhci, trb); - address = trb->link.segment_ptr; + address = trb->link.segment_ptr[0] + + (((u64) trb->link.segment_ptr[1]) << 32); xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); xhci_dbg(xhci, "Interrupter target = 0x%x\n", @@ -272,7 +282,8 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) (unsigned int) (trb->link.control & TRB_NO_SNOOP)); break; case TRB_TYPE(TRB_TRANSFER): - address = trb->trans_event.buffer; + address = trb->trans_event.buffer[0] + + (((u64) trb->trans_event.buffer[1]) << 32); /* * FIXME: look at flags to figure out if it's an address or if * the data is directly in the buffer field. @@ -280,7 +291,8 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); break; case TRB_TYPE(TRB_COMPLETION): - address = trb->event_cmd.cmd_trb; + address = trb->event_cmd.cmd_trb[0] + + (((u64) trb->event_cmd.cmd_trb[1]) << 32); xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); xhci_dbg(xhci, "Completion status = %u\n", (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); @@ -316,8 +328,8 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) for (i = 0; i < TRBS_PER_SEGMENT; ++i) { trb = &seg->trbs[i]; xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, - lower_32_bits(trb->link.segment_ptr), - upper_32_bits(trb->link.segment_ptr), + (unsigned int) trb->link.segment_ptr[0], + (unsigned int) trb->link.segment_ptr[1], (unsigned int) trb->link.intr_target, (unsigned int) trb->link.control); addr += sizeof(*trb); @@ -374,8 +386,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) entry = &erst->entries[i]; xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", (unsigned int) addr, - lower_32_bits(entry->seg_addr), - upper_32_bits(entry->seg_addr), + (unsigned int) entry->seg_addr[0], + (unsigned int) entry->seg_addr[1], (unsigned int) entry->seg_size, (unsigned int) entry->rsvd); addr += sizeof(*entry); @@ -384,147 +396,90 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) { - u64 val; + u32 val; - val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); - xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n", - lower_32_bits(val)); - xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n", - upper_32_bits(val)); -} - -/* Print the last 32 bytes for 64-byte contexts */ -static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma) -{ - int i; - for (i = 0; i < 4; ++i) { - xhci_dbg(xhci, "@%p (virt) @%08llx " - "(dma) %#08llx - rsvd64[%d]\n", - &ctx[4 + i], (unsigned long long)dma, - ctx[4 + i], i); - dma += 8; - } + val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); + xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val); + val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]); + xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val); } -void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) +void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep) { + int i, j; + int last_ep_ctx = 31; /* Fields are 32 bits wide, DMA addresses are in bytes */ int field_size = 32 / 8; - int i; - struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); - dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx); - int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); + xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", + &ctx->drop_flags, (unsigned long long)dma, + ctx->drop_flags); + dma += field_size; + xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n", + &ctx->add_flags, (unsigned long long)dma, + ctx->add_flags); + dma += field_size; + for (i = 0; i > 6; ++i) { + xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", + &ctx->rsvd[i], (unsigned long long)dma, + ctx->rsvd[i], i); + dma += field_size; + } xhci_dbg(xhci, "Slot Context:\n"); xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n", - &slot_ctx->dev_info, - (unsigned long long)dma, slot_ctx->dev_info); + &ctx->slot.dev_info, + (unsigned long long)dma, ctx->slot.dev_info); dma += field_size; xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n", - &slot_ctx->dev_info2, - (unsigned long long)dma, slot_ctx->dev_info2); + &ctx->slot.dev_info2, + (unsigned long long)dma, ctx->slot.dev_info2); dma += field_size; xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n", - &slot_ctx->tt_info, - (unsigned long long)dma, slot_ctx->tt_info); + &ctx->slot.tt_info, + (unsigned long long)dma, ctx->slot.tt_info); dma += field_size; xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n", - &slot_ctx->dev_state, - (unsigned long long)dma, slot_ctx->dev_state); + &ctx->slot.dev_state, + (unsigned long long)dma, ctx->slot.dev_state); dma += field_size; - for (i = 0; i < 4; ++i) { + for (i = 0; i > 4; ++i) { xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", - &slot_ctx->reserved[i], (unsigned long long)dma, - slot_ctx->reserved[i], i); + &ctx->slot.reserved[i], (unsigned long long)dma, + ctx->slot.reserved[i], i); dma += field_size; } - if (csz) - dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); -} - -void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, - struct xhci_container_ctx *ctx, - unsigned int last_ep) -{ - int i, j; - int last_ep_ctx = 31; - /* Fields are 32 bits wide, DMA addresses are in bytes */ - int field_size = 32 / 8; - int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); - if (last_ep < 31) last_ep_ctx = last_ep + 1; for (i = 0; i < last_ep_ctx; ++i) { - struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i); - dma_addr_t dma = ctx->dma + - ((unsigned long)ep_ctx - (unsigned long)ctx); - xhci_dbg(xhci, "Endpoint %02d Context:\n", i); xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", - &ep_ctx->ep_info, - (unsigned long long)dma, ep_ctx->ep_info); + &ctx->ep[i].ep_info, + (unsigned long long)dma, ctx->ep[i].ep_info); dma += field_size; xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n", - &ep_ctx->ep_info2, - (unsigned long long)dma, ep_ctx->ep_info2); + &ctx->ep[i].ep_info2, + (unsigned long long)dma, ctx->ep[i].ep_info2); + dma += field_size; + xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n", + &ctx->ep[i].deq[0], + (unsigned long long)dma, ctx->ep[i].deq[0]); + dma += field_size; + xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n", + &ctx->ep[i].deq[1], + (unsigned long long)dma, ctx->ep[i].deq[1]); dma += field_size; - xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n", - &ep_ctx->deq, - (unsigned long long)dma, ep_ctx->deq); - dma += 2*field_size; xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", - &ep_ctx->tx_info, - (unsigned long long)dma, ep_ctx->tx_info); + &ctx->ep[i].tx_info, + (unsigned long long)dma, ctx->ep[i].tx_info); dma += field_size; for (j = 0; j < 3; ++j) { xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", - &ep_ctx->reserved[j], + &ctx->ep[i].reserved[j], (unsigned long long)dma, - ep_ctx->reserved[j], j); - dma += field_size; - } - - if (csz) - dbg_rsvd64(xhci, (u64 *)ep_ctx, dma); - } -} - -void xhci_dbg_ctx(struct xhci_hcd *xhci, - struct xhci_container_ctx *ctx, - unsigned int last_ep) -{ - int i; - /* Fields are 32 bits wide, DMA addresses are in bytes */ - int field_size = 32 / 8; - struct xhci_slot_ctx *slot_ctx; - dma_addr_t dma = ctx->dma; - int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); - - if (ctx->type == XHCI_CTX_TYPE_INPUT) { - struct xhci_input_control_ctx *ctrl_ctx = - xhci_get_input_control_ctx(xhci, ctx); - xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", - &ctrl_ctx->drop_flags, (unsigned long long)dma, - ctrl_ctx->drop_flags); - dma += field_size; - xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n", - &ctrl_ctx->add_flags, (unsigned long long)dma, - ctrl_ctx->add_flags); - dma += field_size; - for (i = 0; i < 6; ++i) { - xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n", - &ctrl_ctx->rsvd2[i], (unsigned long long)dma, - ctrl_ctx->rsvd2[i], i); + ctx->ep[i].reserved[j], j); dma += field_size; } - - if (csz) - dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma); } - - slot_ctx = xhci_get_slot_ctx(xhci, ctx); - xhci_dbg_slot_ctx(xhci, ctx); - xhci_dbg_ep_ctx(xhci, ctx, last_ep); } diff --git a/trunk/drivers/usb/host/xhci-hcd.c b/trunk/drivers/usb/host/xhci-hcd.c index 816c39caca1c..dba3e07ccd09 100644 --- a/trunk/drivers/usb/host/xhci-hcd.c +++ b/trunk/drivers/usb/host/xhci-hcd.c @@ -103,10 +103,7 @@ int xhci_reset(struct xhci_hcd *xhci) u32 state; state = xhci_readl(xhci, &xhci->op_regs->status); - if ((state & STS_HALT) == 0) { - xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); - return 0; - } + BUG_ON((state & STS_HALT) == 0); xhci_dbg(xhci, "// Reset the HC\n"); command = xhci_readl(xhci, &xhci->op_regs->command); @@ -229,7 +226,6 @@ int xhci_init(struct usb_hcd *hcd) static void xhci_work(struct xhci_hcd *xhci) { u32 temp; - u64 temp_64; /* * Clear the op reg interrupt status first, @@ -252,9 +248,9 @@ static void xhci_work(struct xhci_hcd *xhci) /* FIXME this should be a delayed service routine that clears the EHB */ xhci_handle_event(xhci); - /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ - temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); - xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); + /* Clear the event handler busy flag; the event ring should be empty. */ + temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); + xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]); /* Flush posted writes -- FIXME is this necessary? */ xhci_readl(xhci, &xhci->ir_set->irq_pending); } @@ -270,34 +266,19 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); u32 temp, temp2; - union xhci_trb *trb; spin_lock(&xhci->lock); - trb = xhci->event_ring->dequeue; /* Check if the xHC generated the interrupt, or the irq is shared */ temp = xhci_readl(xhci, &xhci->op_regs->status); temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); - if (temp == 0xffffffff && temp2 == 0xffffffff) - goto hw_died; - if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { spin_unlock(&xhci->lock); return IRQ_NONE; } - xhci_dbg(xhci, "op reg status = %08x\n", temp); - xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2); - xhci_dbg(xhci, "Event ring dequeue ptr:\n"); - xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", - (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), - lower_32_bits(trb->link.segment_ptr), - upper_32_bits(trb->link.segment_ptr), - (unsigned int) trb->link.intr_target, - (unsigned int) trb->link.control); if (temp & STS_FATAL) { xhci_warn(xhci, "WARNING: Host System Error\n"); xhci_halt(xhci); -hw_died: xhci_to_hcd(xhci)->state = HC_STATE_HALT; spin_unlock(&xhci->lock); return -ESHUTDOWN; @@ -314,7 +295,6 @@ void xhci_event_ring_work(unsigned long arg) { unsigned long flags; int temp; - u64 temp_64; struct xhci_hcd *xhci = (struct xhci_hcd *) arg; int i, j; @@ -331,9 +311,9 @@ void xhci_event_ring_work(unsigned long arg) xhci_dbg(xhci, "Event ring:\n"); xhci_debug_segment(xhci, xhci->event_ring->deq_seg); xhci_dbg_ring_ptrs(xhci, xhci->event_ring); - temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); - temp_64 &= ~ERST_PTR_MASK; - xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); + temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); + temp &= ERST_PTR_MASK; + xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); xhci_dbg(xhci, "Command ring:\n"); xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); @@ -376,7 +356,6 @@ void xhci_event_ring_work(unsigned long arg) int xhci_run(struct usb_hcd *hcd) { u32 temp; - u64 temp_64; struct xhci_hcd *xhci = hcd_to_xhci(hcd); void (*doorbell)(struct xhci_hcd *) = NULL; @@ -403,20 +382,6 @@ int xhci_run(struct usb_hcd *hcd) add_timer(&xhci->event_ring_timer); #endif - xhci_dbg(xhci, "Command ring memory map follows:\n"); - xhci_debug_ring(xhci, xhci->cmd_ring); - xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); - xhci_dbg_cmd_ptrs(xhci); - - xhci_dbg(xhci, "ERST memory map follows:\n"); - xhci_dbg_erst(xhci, &xhci->erst); - xhci_dbg(xhci, "Event ring:\n"); - xhci_debug_ring(xhci, xhci->event_ring); - xhci_dbg_ring_ptrs(xhci, xhci->event_ring); - temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); - temp_64 &= ~ERST_PTR_MASK; - xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); - xhci_dbg(xhci, "// Set the interrupt modulation register\n"); temp = xhci_readl(xhci, &xhci->ir_set->irq_control); temp &= ~ER_IRQ_INTERVAL_MASK; @@ -441,6 +406,22 @@ int xhci_run(struct usb_hcd *hcd) if (NUM_TEST_NOOPS > 0) doorbell = xhci_setup_one_noop(xhci); + xhci_dbg(xhci, "Command ring memory map follows:\n"); + xhci_debug_ring(xhci, xhci->cmd_ring); + xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); + xhci_dbg_cmd_ptrs(xhci); + + xhci_dbg(xhci, "ERST memory map follows:\n"); + xhci_dbg_erst(xhci, &xhci->erst); + xhci_dbg(xhci, "Event ring:\n"); + xhci_debug_ring(xhci, xhci->event_ring); + xhci_dbg_ring_ptrs(xhci, xhci->event_ring); + temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); + temp &= ERST_PTR_MASK; + xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); + temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]); + xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp); + temp = xhci_readl(xhci, &xhci->op_regs->command); temp |= (CMD_RUN); xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", @@ -620,13 +601,10 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) goto exit; } if (usb_endpoint_xfer_control(&urb->ep->desc)) - /* We have a spinlock and interrupts disabled, so we must pass - * atomic context to this function, which may allocate memory. - */ - ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, + ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb, slot_id, ep_index); else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) - ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, + ret = xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); else ret = -EINVAL; @@ -683,12 +661,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) goto done; xhci_dbg(xhci, "Cancel URB %p\n", urb); - xhci_dbg(xhci, "Event ring:\n"); - xhci_debug_ring(xhci, xhci->event_ring); ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; - xhci_dbg(xhci, "Endpoint ring:\n"); - xhci_debug_ring(xhci, ep_ring); td = (struct xhci_td *) urb->hcpriv; ep_ring->cancels_pending++; @@ -722,9 +696,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep) { struct xhci_hcd *xhci; - struct xhci_container_ctx *in_ctx, *out_ctx; - struct xhci_input_control_ctx *ctrl_ctx; - struct xhci_slot_ctx *slot_ctx; + struct xhci_device_control *in_ctx; unsigned int last_ctx; unsigned int ep_index; struct xhci_ep_ctx *ep_ctx; @@ -752,34 +724,31 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, } in_ctx = xhci->devs[udev->slot_id]->in_ctx; - out_ctx = xhci->devs[udev->slot_id]->out_ctx; - ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); ep_index = xhci_get_endpoint_index(&ep->desc); - ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); + ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; /* If the HC already knows the endpoint is disabled, * or the HCD has noted it is disabled, ignore this request */ if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || - ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { + in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", __func__, ep); return 0; } - ctrl_ctx->drop_flags |= drop_flag; - new_drop_flags = ctrl_ctx->drop_flags; + in_ctx->drop_flags |= drop_flag; + new_drop_flags = in_ctx->drop_flags; - ctrl_ctx->add_flags = ~drop_flag; - new_add_flags = ctrl_ctx->add_flags; + in_ctx->add_flags = ~drop_flag; + new_add_flags = in_ctx->add_flags; - last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); - slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); + last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags); /* Update the last valid endpoint context, if we deleted the last one */ - if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { - slot_ctx->dev_info &= ~LAST_CTX_MASK; - slot_ctx->dev_info |= LAST_CTX(last_ctx); + if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { + in_ctx->slot.dev_info &= ~LAST_CTX_MASK; + in_ctx->slot.dev_info |= LAST_CTX(last_ctx); } - new_slot_info = slot_ctx->dev_info; + new_slot_info = in_ctx->slot.dev_info; xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); @@ -809,22 +778,17 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep) { struct xhci_hcd *xhci; - struct xhci_container_ctx *in_ctx, *out_ctx; + struct xhci_device_control *in_ctx; unsigned int ep_index; struct xhci_ep_ctx *ep_ctx; - struct xhci_slot_ctx *slot_ctx; - struct xhci_input_control_ctx *ctrl_ctx; u32 added_ctxs; unsigned int last_ctx; u32 new_add_flags, new_drop_flags, new_slot_info; int ret = 0; ret = xhci_check_args(hcd, udev, ep, 1, __func__); - if (ret <= 0) { - /* So we won't queue a reset ep command for a root hub */ - ep->hcpriv = NULL; + if (ret <= 0) return ret; - } xhci = hcd_to_xhci(hcd); added_ctxs = xhci_get_endpoint_flag(&ep->desc); @@ -846,14 +810,12 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, } in_ctx = xhci->devs[udev->slot_id]->in_ctx; - out_ctx = xhci->devs[udev->slot_id]->out_ctx; - ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); ep_index = xhci_get_endpoint_index(&ep->desc); - ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); + ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; /* If the HCD has already noted the endpoint is enabled, * ignore this request. */ - if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { + if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", __func__, ep); return 0; @@ -871,8 +833,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, return -ENOMEM; } - ctrl_ctx->add_flags |= added_ctxs; - new_add_flags = ctrl_ctx->add_flags; + in_ctx->add_flags |= added_ctxs; + new_add_flags = in_ctx->add_flags; /* If xhci_endpoint_disable() was called for this endpoint, but the * xHC hasn't been notified yet through the check_bandwidth() call, @@ -880,18 +842,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, * descriptors. We must drop and re-add this endpoint, so we leave the * drop flags alone. */ - new_drop_flags = ctrl_ctx->drop_flags; + new_drop_flags = in_ctx->drop_flags; - slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); /* Update the last valid endpoint context, if we just added one past */ - if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { - slot_ctx->dev_info &= ~LAST_CTX_MASK; - slot_ctx->dev_info |= LAST_CTX(last_ctx); + if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { + in_ctx->slot.dev_info &= ~LAST_CTX_MASK; + in_ctx->slot.dev_info |= LAST_CTX(last_ctx); } - new_slot_info = slot_ctx->dev_info; - - /* Store the usb_device pointer for later use */ - ep->hcpriv = udev; + new_slot_info = in_ctx->slot.dev_info; xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", (unsigned int) ep->desc.bEndpointAddress, @@ -902,11 +860,9 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, return 0; } -static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) +static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) { - struct xhci_input_control_ctx *ctrl_ctx; struct xhci_ep_ctx *ep_ctx; - struct xhci_slot_ctx *slot_ctx; int i; /* When a device's add flag and drop flag are zero, any subsequent @@ -914,18 +870,17 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir * untouched. Make sure we don't leave any old state in the input * endpoint contexts. */ - ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); - ctrl_ctx->drop_flags = 0; - ctrl_ctx->add_flags = 0; - slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); - slot_ctx->dev_info &= ~LAST_CTX_MASK; + virt_dev->in_ctx->drop_flags = 0; + virt_dev->in_ctx->add_flags = 0; + virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK; /* Endpoint 0 is always valid */ - slot_ctx->dev_info |= LAST_CTX(1); + virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1); for (i = 1; i < 31; ++i) { - ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); + ep_ctx = &virt_dev->in_ctx->ep[i]; ep_ctx->ep_info = 0; ep_ctx->ep_info2 = 0; - ep_ctx->deq = 0; + ep_ctx->deq[0] = 0; + ep_ctx->deq[1] = 0; ep_ctx->tx_info = 0; } } @@ -948,8 +903,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) unsigned long flags; struct xhci_hcd *xhci; struct xhci_virt_device *virt_dev; - struct xhci_input_control_ctx *ctrl_ctx; - struct xhci_slot_ctx *slot_ctx; ret = xhci_check_args(hcd, udev, NULL, 0, __func__); if (ret <= 0) @@ -965,18 +918,16 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) virt_dev = xhci->devs[udev->slot_id]; /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ - ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); - ctrl_ctx->add_flags |= SLOT_FLAG; - ctrl_ctx->add_flags &= ~EP0_FLAG; - ctrl_ctx->drop_flags &= ~SLOT_FLAG; - ctrl_ctx->drop_flags &= ~EP0_FLAG; + virt_dev->in_ctx->add_flags |= SLOT_FLAG; + virt_dev->in_ctx->add_flags &= ~EP0_FLAG; + virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG; + virt_dev->in_ctx->drop_flags &= ~EP0_FLAG; xhci_dbg(xhci, "New Input Control Context:\n"); - slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); - xhci_dbg_ctx(xhci, virt_dev->in_ctx, - LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); + xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, + LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); spin_lock_irqsave(&xhci->lock, flags); - ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma, + ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, udev->slot_id); if (ret < 0) { spin_unlock_irqrestore(&xhci->lock, flags); @@ -1031,10 +982,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) } xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); - xhci_dbg_ctx(xhci, virt_dev->out_ctx, - LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); + xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, + LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); - xhci_zero_in_ctx(xhci, virt_dev); + xhci_zero_in_ctx(virt_dev); /* Free any old rings */ for (i = 1; i < 31; ++i) { if (virt_dev->new_ep_rings[i]) { @@ -1072,67 +1023,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) virt_dev->new_ep_rings[i] = NULL; } } - xhci_zero_in_ctx(xhci, virt_dev); -} - -/* Deal with stalled endpoints. The core should have sent the control message - * to clear the halt condition. However, we need to make the xHCI hardware - * reset its sequence number, since a device will expect a sequence number of - * zero after the halt condition is cleared. - * Context: in_interrupt - */ -void xhci_endpoint_reset(struct usb_hcd *hcd, - struct usb_host_endpoint *ep) -{ - struct xhci_hcd *xhci; - struct usb_device *udev; - unsigned int ep_index; - unsigned long flags; - int ret; - struct xhci_dequeue_state deq_state; - struct xhci_ring *ep_ring; - - xhci = hcd_to_xhci(hcd); - udev = (struct usb_device *) ep->hcpriv; - /* Called with a root hub endpoint (or an endpoint that wasn't added - * with xhci_add_endpoint() - */ - if (!ep->hcpriv) - return; - ep_index = xhci_get_endpoint_index(&ep->desc); - ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index]; - if (!ep_ring->stopped_td) { - xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", - ep->desc.bEndpointAddress); - return; - } - - xhci_dbg(xhci, "Queueing reset endpoint command\n"); - spin_lock_irqsave(&xhci->lock, flags); - ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); - /* - * Can't change the ring dequeue pointer until it's transitioned to the - * stopped state, which is only upon a successful reset endpoint - * command. Better hope that last command worked! - */ - if (!ret) { - xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); - /* We need to move the HW's dequeue pointer past this TD, - * or it will attempt to resend it on the next doorbell ring. - */ - xhci_find_new_dequeue_state(xhci, udev->slot_id, - ep_index, ep_ring->stopped_td, &deq_state); - xhci_dbg(xhci, "Queueing new dequeue state\n"); - xhci_queue_new_dequeue_state(xhci, ep_ring, - udev->slot_id, - ep_index, &deq_state); - kfree(ep_ring->stopped_td); - xhci_ring_cmd_db(xhci); - } - spin_unlock_irqrestore(&xhci->lock, flags); - - if (ret) - xhci_warn(xhci, "FIXME allocate a new ring segment\n"); + xhci_zero_in_ctx(virt_dev); } /* @@ -1229,9 +1120,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) struct xhci_virt_device *virt_dev; int ret = 0; struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct xhci_slot_ctx *slot_ctx; - struct xhci_input_control_ctx *ctrl_ctx; - u64 temp_64; + u32 temp; if (!udev->slot_id) { xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); @@ -1244,12 +1133,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) if (!udev->config) xhci_setup_addressable_virt_dev(xhci, udev); /* Otherwise, assume the core has the device configured how it wants */ - xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); - xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); spin_lock_irqsave(&xhci->lock, flags); - ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, - udev->slot_id); + ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma, + udev->slot_id); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); @@ -1289,37 +1176,41 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) default: xhci_err(xhci, "ERROR: unexpected command completion " "code 0x%x.\n", virt_dev->cmd_status); - xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); - xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); ret = -EINVAL; break; } if (ret) { return ret; } - temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); - xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); - xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", + temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]); + xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp); + temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]); + xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp); + xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n", + udev->slot_id, + &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id], + xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]); + xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n", udev->slot_id, - &xhci->dcbaa->dev_context_ptrs[udev->slot_id], - (unsigned long long) - xhci->dcbaa->dev_context_ptrs[udev->slot_id]); + &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1], + xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]); xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", - (unsigned long long)virt_dev->out_ctx->dma); + (unsigned long long)virt_dev->out_ctx_dma); xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); - xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); + xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); - xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); + xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2); /* * USB core uses address 1 for the roothubs, so we add one to the * address given back to us by the HC. */ - slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); - udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; + udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1; /* Zero the input context control for later use */ - ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); - ctrl_ctx->add_flags = 0; - ctrl_ctx->drop_flags = 0; + virt_dev->in_ctx->add_flags = 0; + virt_dev->in_ctx->drop_flags = 0; + /* Mirror flags in the output context for future ep enable/disable */ + virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG; + virt_dev->out_ctx->drop_flags = 0; xhci_dbg(xhci, "Device address = %d\n", udev->devnum); /* XXX Meh, not sure if anyone else but choose_address uses this. */ @@ -1361,6 +1252,7 @@ static int __init xhci_hcd_init(void) /* xhci_device_control has eight fields, and also * embeds one xhci_slot_ctx and 31 xhci_ep_ctx */ + BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8); BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); diff --git a/trunk/drivers/usb/host/xhci-mem.c b/trunk/drivers/usb/host/xhci-mem.c index e6b9a1c6002d..c8a72de1c508 100644 --- a/trunk/drivers/usb/host/xhci-mem.c +++ b/trunk/drivers/usb/host/xhci-mem.c @@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, return; prev->next = next; if (link_trbs) { - prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; + prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; @@ -189,63 +189,6 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, return 0; } -#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) - -struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, - int type, gfp_t flags) -{ - struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); - if (!ctx) - return NULL; - - BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); - ctx->type = type; - ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; - if (type == XHCI_CTX_TYPE_INPUT) - ctx->size += CTX_SIZE(xhci->hcc_params); - - ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); - memset(ctx->bytes, 0, ctx->size); - return ctx; -} - -void xhci_free_container_ctx(struct xhci_hcd *xhci, - struct xhci_container_ctx *ctx) -{ - dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); - kfree(ctx); -} - -struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, - struct xhci_container_ctx *ctx) -{ - BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); - return (struct xhci_input_control_ctx *)ctx->bytes; -} - -struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, - struct xhci_container_ctx *ctx) -{ - if (ctx->type == XHCI_CTX_TYPE_DEVICE) - return (struct xhci_slot_ctx *)ctx->bytes; - - return (struct xhci_slot_ctx *) - (ctx->bytes + CTX_SIZE(xhci->hcc_params)); -} - -struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, - struct xhci_container_ctx *ctx, - unsigned int ep_index) -{ - /* increment ep index by offset of start of ep ctx array */ - ep_index++; - if (ctx->type == XHCI_CTX_TYPE_INPUT) - ep_index++; - - return (struct xhci_ep_ctx *) - (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); -} - /* All the xhci_tds in the ring's TD list should be freed at this point */ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) { @@ -257,7 +200,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) return; dev = xhci->devs[slot_id]; - xhci->dcbaa->dev_context_ptrs[slot_id] = 0; + xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; + xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; if (!dev) return; @@ -266,10 +210,11 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) xhci_ring_free(xhci, dev->ep_rings[i]); if (dev->in_ctx) - xhci_free_container_ctx(xhci, dev->in_ctx); + dma_pool_free(xhci->device_pool, + dev->in_ctx, dev->in_ctx_dma); if (dev->out_ctx) - xhci_free_container_ctx(xhci, dev->out_ctx); - + dma_pool_free(xhci->device_pool, + dev->out_ctx, dev->out_ctx_dma); kfree(xhci->devs[slot_id]); xhci->devs[slot_id] = 0; } @@ -277,6 +222,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags) { + dma_addr_t dma; struct xhci_virt_device *dev; /* Slot ID 0 is reserved */ @@ -290,21 +236,23 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, return 0; dev = xhci->devs[slot_id]; - /* Allocate the (output) device context that will be used in the HC. */ - dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); + /* Allocate the (output) device context that will be used in the HC */ + dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); if (!dev->out_ctx) goto fail; - + dev->out_ctx_dma = dma; xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, - (unsigned long long)dev->out_ctx->dma); + (unsigned long long)dma); + memset(dev->out_ctx, 0, sizeof(*dev->out_ctx)); /* Allocate the (input) device context for address device command */ - dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); + dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); if (!dev->in_ctx) goto fail; - + dev->in_ctx_dma = dma; xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, - (unsigned long long)dev->in_ctx->dma); + (unsigned long long)dma); + memset(dev->in_ctx, 0, sizeof(*dev->in_ctx)); /* Allocate endpoint 0 ring */ dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); @@ -313,12 +261,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, init_completion(&dev->cmd_completion); - /* Point to output device context in dcbaa. */ - xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; + /* + * Point to output device context in dcbaa; skip the output control + * context, which is eight 32 bit fields (or 32 bytes long) + */ + xhci->dcbaa->dev_context_ptrs[2*slot_id] = + (u32) dev->out_ctx_dma + (32); xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", slot_id, - &xhci->dcbaa->dev_context_ptrs[slot_id], - (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); + &xhci->dcbaa->dev_context_ptrs[2*slot_id], + (unsigned long long)dev->out_ctx_dma); + xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; return 1; fail: @@ -332,8 +285,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud struct xhci_virt_device *dev; struct xhci_ep_ctx *ep0_ctx; struct usb_device *top_dev; - struct xhci_slot_ctx *slot_ctx; - struct xhci_input_control_ctx *ctrl_ctx; dev = xhci->devs[udev->slot_id]; /* Slot ID 0 is reserved */ @@ -342,29 +293,27 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud udev->slot_id); return -EINVAL; } - ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); - ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); - slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); + ep0_ctx = &dev->in_ctx->ep[0]; /* 2) New slot context and endpoint 0 context are valid*/ - ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; + dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; /* 3) Only the control endpoint is valid - one endpoint context */ - slot_ctx->dev_info |= LAST_CTX(1); + dev->in_ctx->slot.dev_info |= LAST_CTX(1); switch (udev->speed) { case USB_SPEED_SUPER: - slot_ctx->dev_info |= (u32) udev->route; - slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; + dev->in_ctx->slot.dev_info |= (u32) udev->route; + dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; break; case USB_SPEED_HIGH: - slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; + dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; break; case USB_SPEED_FULL: - slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; + dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; break; case USB_SPEED_LOW: - slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; + dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; break; case USB_SPEED_VARIABLE: xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); @@ -378,7 +327,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud for (top_dev = udev; top_dev->parent && top_dev->parent->parent; top_dev = top_dev->parent) /* Found device below root hub */; - slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); + dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); /* Is this a LS/FS device under a HS hub? */ @@ -388,8 +337,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud */ if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && udev->tt) { - slot_ctx->tt_info = udev->tt->hub->slot_id; - slot_ctx->tt_info |= udev->ttport << 8; + dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; + dev->in_ctx->slot.tt_info |= udev->ttport << 8; } xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); @@ -411,9 +360,10 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ep0_ctx->ep_info2 |= MAX_BURST(0); ep0_ctx->ep_info2 |= ERROR_COUNT(3); - ep0_ctx->deq = + ep0_ctx->deq[0] = dev->ep_rings[0]->first_seg->dma; - ep0_ctx->deq |= dev->ep_rings[0]->cycle_state; + ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; + ep0_ctx->deq[1] = 0; /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ @@ -520,26 +470,25 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, unsigned int max_burst; ep_index = xhci_get_endpoint_index(&ep->desc); - ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); + ep_ctx = &virt_dev->in_ctx->ep[ep_index]; /* Set up the endpoint ring */ virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); if (!virt_dev->new_ep_rings[ep_index]) return -ENOMEM; ep_ring = virt_dev->new_ep_rings[ep_index]; - ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; + ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state; + ep_ctx->deq[1] = 0; ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); /* FIXME dig Mult and streams info out of ep companion desc */ - /* Allow 3 retries for everything but isoc; - * error count = 0 means infinite retries. - */ + /* Allow 3 retries for everything but isoc */ if (!usb_endpoint_xfer_isoc(&ep->desc)) ep_ctx->ep_info2 = ERROR_COUNT(3); else - ep_ctx->ep_info2 = ERROR_COUNT(1); + ep_ctx->ep_info2 = ERROR_COUNT(0); ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); @@ -549,12 +498,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, max_packet = ep->desc.wMaxPacketSize; ep_ctx->ep_info2 |= MAX_PACKET(max_packet); /* dig out max burst from ep companion desc */ - if (!ep->ss_ep_comp) { - xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n"); - max_packet = 0; - } else { - max_packet = ep->ss_ep_comp->desc.bMaxBurst; - } + max_packet = ep->ss_ep_comp->desc.bMaxBurst; ep_ctx->ep_info2 |= MAX_BURST(max_packet); break; case USB_SPEED_HIGH: @@ -587,114 +531,18 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_ep_ctx *ep_ctx; ep_index = xhci_get_endpoint_index(&ep->desc); - ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); + ep_ctx = &virt_dev->in_ctx->ep[ep_index]; ep_ctx->ep_info = 0; ep_ctx->ep_info2 = 0; - ep_ctx->deq = 0; + ep_ctx->deq[0] = 0; + ep_ctx->deq[1] = 0; ep_ctx->tx_info = 0; /* Don't free the endpoint ring until the set interface or configuration * request succeeds. */ } -/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ -static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) -{ - int i; - struct device *dev = xhci_to_hcd(xhci)->self.controller; - int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); - - xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); - - if (!num_sp) - return 0; - - xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); - if (!xhci->scratchpad) - goto fail_sp; - - xhci->scratchpad->sp_array = - pci_alloc_consistent(to_pci_dev(dev), - num_sp * sizeof(u64), - &xhci->scratchpad->sp_dma); - if (!xhci->scratchpad->sp_array) - goto fail_sp2; - - xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); - if (!xhci->scratchpad->sp_buffers) - goto fail_sp3; - - xhci->scratchpad->sp_dma_buffers = - kzalloc(sizeof(dma_addr_t) * num_sp, flags); - - if (!xhci->scratchpad->sp_dma_buffers) - goto fail_sp4; - - xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; - for (i = 0; i < num_sp; i++) { - dma_addr_t dma; - void *buf = pci_alloc_consistent(to_pci_dev(dev), - xhci->page_size, &dma); - if (!buf) - goto fail_sp5; - - xhci->scratchpad->sp_array[i] = dma; - xhci->scratchpad->sp_buffers[i] = buf; - xhci->scratchpad->sp_dma_buffers[i] = dma; - } - - return 0; - - fail_sp5: - for (i = i - 1; i >= 0; i--) { - pci_free_consistent(to_pci_dev(dev), xhci->page_size, - xhci->scratchpad->sp_buffers[i], - xhci->scratchpad->sp_dma_buffers[i]); - } - kfree(xhci->scratchpad->sp_dma_buffers); - - fail_sp4: - kfree(xhci->scratchpad->sp_buffers); - - fail_sp3: - pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64), - xhci->scratchpad->sp_array, - xhci->scratchpad->sp_dma); - - fail_sp2: - kfree(xhci->scratchpad); - xhci->scratchpad = NULL; - - fail_sp: - return -ENOMEM; -} - -static void scratchpad_free(struct xhci_hcd *xhci) -{ - int num_sp; - int i; - struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); - - if (!xhci->scratchpad) - return; - - num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); - - for (i = 0; i < num_sp; i++) { - pci_free_consistent(pdev, xhci->page_size, - xhci->scratchpad->sp_buffers[i], - xhci->scratchpad->sp_dma_buffers[i]); - } - kfree(xhci->scratchpad->sp_dma_buffers); - kfree(xhci->scratchpad->sp_buffers); - pci_free_consistent(pdev, num_sp * sizeof(u64), - xhci->scratchpad->sp_array, - xhci->scratchpad->sp_dma); - kfree(xhci->scratchpad); - xhci->scratchpad = NULL; -} - void xhci_mem_cleanup(struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); @@ -703,8 +551,10 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) /* Free the Event Ring Segment Table and the actual Event Ring */ xhci_writel(xhci, 0, &xhci->ir_set->erst_size); - xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); - xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); + xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); + xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); + xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]); + xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); if (xhci->erst.entries) pci_free_consistent(pdev, size, @@ -716,7 +566,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->event_ring = NULL; xhci_dbg(xhci, "Freed event ring\n"); - xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); + xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); + xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]); if (xhci->cmd_ring) xhci_ring_free(xhci, xhci->cmd_ring); xhci->cmd_ring = NULL; @@ -735,7 +586,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->device_pool = NULL; xhci_dbg(xhci, "Freed device context pool\n"); - xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); + xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); + xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]); if (xhci->dcbaa) pci_free_consistent(pdev, sizeof(*xhci->dcbaa), xhci->dcbaa, xhci->dcbaa->dma); @@ -743,7 +595,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->page_size = 0; xhci->page_shift = 0; - scratchpad_free(xhci); } int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) @@ -751,7 +602,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) dma_addr_t dma; struct device *dev = xhci_to_hcd(xhci)->self.controller; unsigned int val, val2; - u64 val_64; struct xhci_segment *seg; u32 page_size; int i; @@ -797,7 +647,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->dcbaa->dma = dma; xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); - xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); + xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); + xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]); /* * Initialize the ring segment pool. The ring must be a contiguous @@ -807,10 +658,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) */ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, SEGMENT_SIZE, 64, xhci->page_size); - /* See Table 46 and Note on Figure 55 */ + /* FIXME support 64-byte contexts */ xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, - 2112, 64, xhci->page_size); + sizeof(struct xhci_device_control), + 64, xhci->page_size); if (!xhci->segment_pool || !xhci->device_pool) goto fail; @@ -823,12 +675,14 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) (unsigned long long)xhci->cmd_ring->first_seg->dma); /* Set the address in the Command Ring Control register */ - val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); - val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | - (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | + val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); + val = (val & ~CMD_RING_ADDR_MASK) | + (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | xhci->cmd_ring->cycle_state; - xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); - xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); + xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); + xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); + xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n"); + xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]); xhci_dbg_cmd_ptrs(xhci); val = xhci_readl(xhci, &xhci->cap_regs->db_off); @@ -868,7 +722,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) /* set ring base address and size for each segment table entry */ for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { struct xhci_erst_entry *entry = &xhci->erst.entries[val]; - entry->seg_addr = seg->dma; + entry->seg_addr[0] = seg->dma; + entry->seg_addr[1] = 0; entry->seg_size = TRBS_PER_SEGMENT; entry->rsvd = 0; seg = seg->next; @@ -886,10 +741,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) /* set the segment table base address */ xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", (unsigned long long)xhci->erst.erst_dma_addr); - val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); - val_64 &= ERST_PTR_MASK; - val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); - xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); + val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); + val &= ERST_PTR_MASK; + val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); + xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); + xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); /* Set the event ring dequeue address */ xhci_set_hc_event_deq(xhci); @@ -905,11 +761,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) for (i = 0; i < MAX_HC_SLOTS; ++i) xhci->devs[i] = 0; - if (scratchpad_alloc(xhci, flags)) - goto fail; - return 0; - fail: xhci_warn(xhci, "Couldn't initialize memory\n"); xhci_mem_cleanup(xhci); diff --git a/trunk/drivers/usb/host/xhci-pci.c b/trunk/drivers/usb/host/xhci-pci.c index 592fe7e623f7..1462709e26c0 100644 --- a/trunk/drivers/usb/host/xhci-pci.c +++ b/trunk/drivers/usb/host/xhci-pci.c @@ -117,7 +117,6 @@ static const struct hc_driver xhci_pci_hc_driver = { .free_dev = xhci_free_dev, .add_endpoint = xhci_add_endpoint, .drop_endpoint = xhci_drop_endpoint, - .endpoint_reset = xhci_endpoint_reset, .check_bandwidth = xhci_check_bandwidth, .reset_bandwidth = xhci_reset_bandwidth, .address_device = xhci_address_device, diff --git a/trunk/drivers/usb/host/xhci-ring.c b/trunk/drivers/usb/host/xhci-ring.c index aa88a067148b..02d81985c454 100644 --- a/trunk/drivers/usb/host/xhci-ring.c +++ b/trunk/drivers/usb/host/xhci-ring.c @@ -135,7 +135,6 @@ static void next_trb(struct xhci_hcd *xhci, static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) { union xhci_trb *next = ++(ring->dequeue); - unsigned long long addr; ring->deq_updates++; /* Update the dequeue pointer further if that was a link TRB or we're at @@ -153,13 +152,6 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer ring->dequeue = ring->deq_seg->trbs; next = ring->dequeue; } - addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); - if (ring == xhci->event_ring) - xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr); - else if (ring == xhci->cmd_ring) - xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr); - else - xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr); } /* @@ -179,7 +171,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer { u32 chain; union xhci_trb *next; - unsigned long long addr; chain = ring->enqueue->generic.field[3] & TRB_CHAIN; next = ++(ring->enqueue); @@ -213,13 +204,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer ring->enqueue = ring->enq_seg->trbs; next = ring->enqueue; } - addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); - if (ring == xhci->event_ring) - xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr); - else if (ring == xhci->cmd_ring) - xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr); - else - xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr); } /* @@ -253,7 +237,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, void xhci_set_hc_event_deq(struct xhci_hcd *xhci) { - u64 temp; + u32 temp; dma_addr_t deq; deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, @@ -262,15 +246,13 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci) xhci_warn(xhci, "WARN something wrong with SW event ring " "dequeue ptr.\n"); /* Update HC event ring dequeue pointer */ - temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); temp &= ERST_PTR_MASK; - /* Don't clear the EHB bit (which is RW1C) because - * there might be more events to service. - */ - temp &= ~ERST_EHB; - xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n"); - xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, - &xhci->ir_set->erst_dequeue); + if (!in_interrupt()) + xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); + xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); + xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, + &xhci->ir_set->erst_dequeue[0]); } /* Ring the host controller doorbell after placing a command on the ring */ @@ -297,8 +279,7 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, /* Don't ring the doorbell for this endpoint if there are pending * cancellations because the we don't want to interrupt processing. */ - if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING) - && !(ep_ring->state & EP_HALTED)) { + if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) { field = xhci_readl(xhci, db_addr) & DB_MASK; xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); /* Flush PCI posted writes - FIXME Matthew Wilcox says this @@ -335,6 +316,12 @@ static struct xhci_segment *find_trb_seg( return cur_seg; } +struct dequeue_state { + struct xhci_segment *new_deq_seg; + union xhci_trb *new_deq_ptr; + int new_cycle_state; +}; + /* * Move the xHC's endpoint ring dequeue pointer past cur_td. * Record the new state of the xHC's endpoint ring dequeue segment, @@ -349,30 +336,24 @@ static struct xhci_segment *find_trb_seg( * - Finally we move the dequeue state one TRB further, toggling the cycle bit * if we've moved it past a link TRB with the toggle cycle bit set. */ -void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, +static void find_new_dequeue_state(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, - struct xhci_td *cur_td, struct xhci_dequeue_state *state) + struct xhci_td *cur_td, struct dequeue_state *state) { struct xhci_virt_device *dev = xhci->devs[slot_id]; struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; struct xhci_generic_trb *trb; - struct xhci_ep_ctx *ep_ctx; - dma_addr_t addr; state->new_cycle_state = 0; - xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); state->new_deq_seg = find_trb_seg(cur_td->start_seg, ep_ring->stopped_trb, &state->new_cycle_state); if (!state->new_deq_seg) BUG(); /* Dig out the cycle state saved by the xHC during the stop ep cmd */ - xhci_dbg(xhci, "Finding endpoint context\n"); - ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); - state->new_cycle_state = 0x1 & ep_ctx->deq; + state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0]; state->new_deq_ptr = cur_td->last_trb; - xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); state->new_deq_seg = find_trb_seg(state->new_deq_seg, state->new_deq_ptr, &state->new_cycle_state); @@ -386,12 +367,6 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); /* Don't update the ring cycle state for the producer (us). */ - xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", - state->new_deq_seg); - addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); - xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", - (unsigned long long) addr); - xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); ep_ring->dequeue = state->new_deq_ptr; ep_ring->deq_seg = state->new_deq_seg; } @@ -441,30 +416,6 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, unsigned int ep_index, struct xhci_segment *deq_seg, union xhci_trb *deq_ptr, u32 cycle_state); -void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, - struct xhci_ring *ep_ring, unsigned int slot_id, - unsigned int ep_index, struct xhci_dequeue_state *deq_state) -{ - xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " - "new deq ptr = %p (0x%llx dma), new cycle = %u\n", - deq_state->new_deq_seg, - (unsigned long long)deq_state->new_deq_seg->dma, - deq_state->new_deq_ptr, - (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), - deq_state->new_cycle_state); - queue_set_tr_deq(xhci, slot_id, ep_index, - deq_state->new_deq_seg, - deq_state->new_deq_ptr, - (u32) deq_state->new_cycle_state); - /* Stop the TD queueing code from ringing the doorbell until - * this command completes. The HC won't set the dequeue pointer - * if the ring is running, and ringing the doorbell starts the - * ring running. - */ - ep_ring->state |= SET_DEQ_PENDING; - xhci_ring_cmd_db(xhci); -} - /* * When we get a command completion for a Stop Endpoint Command, we need to * unlink any cancelled TDs from the ring. There are two ways to do that: @@ -485,7 +436,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, struct xhci_td *cur_td = 0; struct xhci_td *last_unlinked_td; - struct xhci_dequeue_state deq_state; + struct dequeue_state deq_state; #ifdef CONFIG_USB_HCD_STAT ktime_t stop_time = ktime_get(); #endif @@ -513,7 +464,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, * move the xHC endpoint ring dequeue pointer past this TD. */ if (cur_td == ep_ring->stopped_td) - xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, + find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, &deq_state); else td_to_noop(xhci, ep_ring, cur_td); @@ -529,8 +480,24 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { - xhci_queue_new_dequeue_state(xhci, ep_ring, - slot_id, ep_index, &deq_state); + xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " + "new deq ptr = %p (0x%llx dma), new cycle = %u\n", + deq_state.new_deq_seg, + (unsigned long long)deq_state.new_deq_seg->dma, + deq_state.new_deq_ptr, + (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr), + deq_state.new_cycle_state); + queue_set_tr_deq(xhci, slot_id, ep_index, + deq_state.new_deq_seg, + deq_state.new_deq_ptr, + (u32) deq_state.new_cycle_state); + /* Stop the TD queueing code from ringing the doorbell until + * this command completes. The HC won't set the dequeue pointer + * if the ring is running, and ringing the doorbell starts the + * ring running. + */ + ep_ring->state |= SET_DEQ_PENDING; + xhci_ring_cmd_db(xhci); } else { /* Otherwise just ring the doorbell to restart the ring */ ring_ep_doorbell(xhci, slot_id, ep_index); @@ -584,15 +551,11 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, unsigned int ep_index; struct xhci_ring *ep_ring; struct xhci_virt_device *dev; - struct xhci_ep_ctx *ep_ctx; - struct xhci_slot_ctx *slot_ctx; slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); dev = xhci->devs[slot_id]; ep_ring = dev->ep_rings[ep_index]; - ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); - slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { unsigned int ep_state; @@ -606,9 +569,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, case COMP_CTX_STATE: xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " "to incorrect slot or ep state.\n"); - ep_state = ep_ctx->ep_info; + ep_state = dev->out_ctx->ep[ep_index].ep_info; ep_state &= EP_STATE_MASK; - slot_state = slot_ctx->dev_state; + slot_state = dev->out_ctx->slot.dev_state; slot_state = GET_SLOT_STATE(slot_state); xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", slot_state, ep_state); @@ -630,33 +593,16 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, * cancelling URBs, which might not be an error... */ } else { - xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", - ep_ctx->deq); + xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, " + "deq[1] = 0x%x.\n", + dev->out_ctx->ep[ep_index].deq[0], + dev->out_ctx->ep[ep_index].deq[1]); } ep_ring->state &= ~SET_DEQ_PENDING; ring_ep_doorbell(xhci, slot_id, ep_index); } -static void handle_reset_ep_completion(struct xhci_hcd *xhci, - struct xhci_event_cmd *event, - union xhci_trb *trb) -{ - int slot_id; - unsigned int ep_index; - - slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); - ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); - /* This command will only fail if the endpoint wasn't halted, - * but we don't care. - */ - xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", - (unsigned int) GET_COMP_CODE(event->status)); - - /* Clear our internal halted state and restart the ring */ - xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED; - ring_ep_doorbell(xhci, slot_id, ep_index); -} static void handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_event_cmd *event) @@ -665,7 +611,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, u64 cmd_dma; dma_addr_t cmd_dequeue_dma; - cmd_dma = event->cmd_trb; + cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue); /* Is the command ring deq ptr out of sync with the deq seg ptr? */ @@ -707,9 +653,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, case TRB_TYPE(TRB_CMD_NOOP): ++xhci->noops_handled; break; - case TRB_TYPE(TRB_RESET_EP): - handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); - break; default: /* Skip over unknown commands on the event ring */ xhci->error_bitmask |= 1 << 6; @@ -813,9 +756,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, union xhci_trb *event_trb; struct urb *urb = 0; int status = -EINPROGRESS; - struct xhci_ep_ctx *ep_ctx; - xhci_dbg(xhci, "In %s\n", __func__); xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; if (!xdev) { xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); @@ -824,17 +765,17 @@ static int handle_tx_event(struct xhci_hcd *xhci, /* Endpoint ID is 1 based, our index is zero based */ ep_index = TRB_TO_EP_ID(event->flags) - 1; - xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); ep_ring = xdev->ep_rings[ep_index]; - ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); - if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { + if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); return -ENODEV; } - event_dma = event->buffer; + event_dma = event->buffer[0]; + if (event->buffer[1] != 0) + xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n"); + /* This TRB should be in the TD at the head of this ring's TD list */ - xhci_dbg(xhci, "%s - checking for list empty\n", __func__); if (list_empty(&ep_ring->td_list)) { xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", TRB_TO_SLOT_ID(event->flags), ep_index); @@ -844,14 +785,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, urb = NULL; goto cleanup; } - xhci_dbg(xhci, "%s - getting list entry\n", __func__); td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); /* Is this a TRB in the currently executing TD? */ - xhci_dbg(xhci, "%s - looking for TD\n", __func__); event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, td->last_trb, event_dma); - xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg); if (!event_seg) { /* HC is busted, give up! */ xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); @@ -860,10 +798,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); - xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n", - lower_32_bits(event->buffer)); - xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n", - upper_32_bits(event->buffer)); + xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n", + (unsigned int) event->buffer[0]); + xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n", + (unsigned int) event->buffer[1]); xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", (unsigned int) event->transfer_len); xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", @@ -885,7 +823,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, break; case COMP_STALL: xhci_warn(xhci, "WARN: Stalled endpoint\n"); - ep_ring->state |= EP_HALTED; status = -EPIPE; break; case COMP_TRB_ERR: @@ -896,10 +833,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_warn(xhci, "WARN: transfer error on endpoint\n"); status = -EPROTO; break; - case COMP_BABBLE: - xhci_warn(xhci, "WARN: babble error on endpoint\n"); - status = -EOVERFLOW; - break; case COMP_DB_ERR: xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); status = -ENOSR; @@ -941,26 +874,15 @@ static int handle_tx_event(struct xhci_hcd *xhci, if (event_trb != ep_ring->dequeue) { /* The event was for the status stage */ if (event_trb == td->last_trb) { - if (td->urb->actual_length != 0) { - /* Don't overwrite a previously set error code */ - if (status == -EINPROGRESS || status == 0) - /* Did we already see a short data stage? */ - status = -EREMOTEIO; - } else { - td->urb->actual_length = - td->urb->transfer_buffer_length; - } + td->urb->actual_length = + td->urb->transfer_buffer_length; } else { /* Maybe the event was for the data stage? */ - if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) { + if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) /* We didn't stop on a link TRB in the middle */ td->urb->actual_length = td->urb->transfer_buffer_length - TRB_LEN(event->transfer_len); - xhci_dbg(xhci, "Waiting for status stage event\n"); - urb = NULL; - goto cleanup; - } } } } else { @@ -1007,20 +929,16 @@ static int handle_tx_event(struct xhci_hcd *xhci, TRB_LEN(event->transfer_len)); td->urb->actual_length = 0; } - /* Don't overwrite a previously set error code */ - if (status == -EINPROGRESS) { - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - status = -EREMOTEIO; - else - status = 0; - } + if (td->urb->transfer_flags & URB_SHORT_NOT_OK) + status = -EREMOTEIO; + else + status = 0; } else { td->urb->actual_length = td->urb->transfer_buffer_length; /* Ignore a short packet completion if the * untransferred length was zero. */ - if (status == -EREMOTEIO) - status = 0; + status = 0; } } else { /* Slow path - walk the list, starting from the dequeue @@ -1047,30 +965,19 @@ static int handle_tx_event(struct xhci_hcd *xhci, TRB_LEN(event->transfer_len); } } + /* The Endpoint Stop Command completion will take care of + * any stopped TDs. A stopped TD may be restarted, so don't update the + * ring dequeue pointer or take this TD off any lists yet. + */ if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || GET_COMP_CODE(event->transfer_len) == COMP_STOP) { - /* The Endpoint Stop Command completion will take care of any - * stopped TDs. A stopped TD may be restarted, so don't update - * the ring dequeue pointer or take this TD off any lists yet. - */ ep_ring->stopped_td = td; ep_ring->stopped_trb = event_trb; } else { - if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) { - /* The transfer is completed from the driver's - * perspective, but we need to issue a set dequeue - * command for this stalled endpoint to move the dequeue - * pointer past the TD. We can't do that here because - * the halt condition must be cleared first. - */ - ep_ring->stopped_td = td; - ep_ring->stopped_trb = event_trb; - } else { - /* Update ring dequeue pointer */ - while (ep_ring->dequeue != td->last_trb) - inc_deq(xhci, ep_ring, false); + /* Update ring dequeue pointer */ + while (ep_ring->dequeue != td->last_trb) inc_deq(xhci, ep_ring, false); - } + inc_deq(xhci, ep_ring, false); /* Clean up the endpoint's TD list */ urb = td->urb; @@ -1080,10 +987,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, list_del(&td->cancelled_td_list); ep_ring->cancels_pending--; } - /* Leave the TD around for the reset endpoint function to use */ - if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) { - kfree(td); - } + kfree(td); urb->hcpriv = NULL; } cleanup: @@ -1093,8 +997,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ if (urb) { usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); - xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n", - urb, td->urb->actual_length, status); spin_unlock(&xhci->lock); usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); spin_lock(&xhci->lock); @@ -1112,7 +1014,6 @@ void xhci_handle_event(struct xhci_hcd *xhci) int update_ptrs = 1; int ret; - xhci_dbg(xhci, "In %s\n", __func__); if (!xhci->event_ring || !xhci->event_ring->dequeue) { xhci->error_bitmask |= 1 << 1; return; @@ -1125,25 +1026,18 @@ void xhci_handle_event(struct xhci_hcd *xhci) xhci->error_bitmask |= 1 << 2; return; } - xhci_dbg(xhci, "%s - OS owns TRB\n", __func__); /* FIXME: Handle more event types. */ switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { case TRB_TYPE(TRB_COMPLETION): - xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__); handle_cmd_completion(xhci, &event->event_cmd); - xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__); break; case TRB_TYPE(TRB_PORT_STATUS): - xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__); handle_port_status(xhci, event); - xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__); update_ptrs = 0; break; case TRB_TYPE(TRB_TRANSFER): - xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__); ret = handle_tx_event(xhci, &event->trans_event); - xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__); if (ret < 0) xhci->error_bitmask |= 1 << 9; else @@ -1199,13 +1093,13 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, */ xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); return -ENOENT; + case EP_STATE_HALTED: case EP_STATE_ERROR: - xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); + xhci_warn(xhci, "WARN waiting for halt or error on ep " + "to be cleared\n"); /* FIXME event handling code for error needs to clear it */ /* XXX not sure if this should be -ENOENT or not */ return -EINVAL; - case EP_STATE_HALTED: - xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); case EP_STATE_STOPPED: case EP_STATE_RUNNING: break; @@ -1234,9 +1128,9 @@ static int prepare_transfer(struct xhci_hcd *xhci, gfp_t mem_flags) { int ret; - struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); + ret = prepare_ring(xhci, xdev->ep_rings[ep_index], - ep_ctx->ep_info & EP_STATE_MASK, + xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, num_trbs, mem_flags); if (ret) return ret; @@ -1391,7 +1285,6 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* Queue the first TRB, even if it's zero-length */ do { u32 field = 0; - u32 length_field = 0; /* Don't change the cycle bit of the first TRB until later */ if (first_trb) @@ -1421,13 +1314,10 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), (unsigned int) addr + trb_buff_len); } - length_field = TRB_LEN(trb_buff_len) | - TD_REMAINDER(urb->transfer_buffer_length - running_total) | - TRB_INTR_TARGET(0); queue_trb(xhci, ep_ring, false, - lower_32_bits(addr), - upper_32_bits(addr), - length_field, + (u32) addr, + (u32) ((u64) addr >> 32), + TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), /* We always want to know if the TRB was short, * or we won't get an event when it completes. * (Unless we use event data TRBs, which are a @@ -1475,7 +1365,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct xhci_generic_trb *start_trb; bool first_trb; int start_cycle; - u32 field, length_field; + u32 field; int running_total, trb_buff_len, ret; u64 addr; @@ -1553,13 +1443,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, td->last_trb = ep_ring->enqueue; field |= TRB_IOC; } - length_field = TRB_LEN(trb_buff_len) | - TD_REMAINDER(urb->transfer_buffer_length - running_total) | - TRB_INTR_TARGET(0); queue_trb(xhci, ep_ring, false, - lower_32_bits(addr), - upper_32_bits(addr), - length_field, + (u32) addr, + (u32) ((u64) addr >> 32), + TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), /* We always want to know if the TRB was short, * or we won't get an event when it completes. * (Unless we use event data TRBs, which are a @@ -1591,7 +1478,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct usb_ctrlrequest *setup; struct xhci_generic_trb *start_trb; int start_cycle; - u32 field, length_field; + u32 field; struct xhci_td *td; ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; @@ -1641,16 +1528,13 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* If there's data, queue data TRBs */ field = 0; - length_field = TRB_LEN(urb->transfer_buffer_length) | - TD_REMAINDER(urb->transfer_buffer_length) | - TRB_INTR_TARGET(0); if (urb->transfer_buffer_length > 0) { if (setup->bRequestType & USB_DIR_IN) field |= TRB_DIR_IN; queue_trb(xhci, ep_ring, false, lower_32_bits(urb->transfer_dma), upper_32_bits(urb->transfer_dma), - length_field, + TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0), /* Event on short tx */ field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); } @@ -1719,8 +1603,7 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id) { - return queue_command(xhci, lower_32_bits(in_ctx_ptr), - upper_32_bits(in_ctx_ptr), 0, + return queue_command(xhci, in_ctx_ptr, 0, 0, TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); } @@ -1728,8 +1611,7 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id) { - return queue_command(xhci, lower_32_bits(in_ctx_ptr), - upper_32_bits(in_ctx_ptr), 0, + return queue_command(xhci, in_ctx_ptr, 0, 0, TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); } @@ -1757,23 +1639,10 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, u32 type = TRB_TYPE(TRB_SET_DEQ); addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); - if (addr == 0) { + if (addr == 0) xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", deq_seg, deq_ptr); - return 0; - } - return queue_command(xhci, lower_32_bits(addr) | cycle_state, - upper_32_bits(addr), 0, + return queue_command(xhci, (u32) addr | cycle_state, 0, 0, trb_slot_id | trb_ep_index | type); } - -int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, - unsigned int ep_index) -{ - u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); - u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); - u32 type = TRB_TYPE(TRB_RESET_EP); - - return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type); -} diff --git a/trunk/drivers/usb/host/xhci.h b/trunk/drivers/usb/host/xhci.h index d31d32206ba3..8936eeb5588b 100644 --- a/trunk/drivers/usb/host/xhci.h +++ b/trunk/drivers/usb/host/xhci.h @@ -25,7 +25,6 @@ #include #include -#include #include "../core/hcd.h" /* Code sharing between pci-quirks and xhci hcd */ @@ -43,6 +42,14 @@ * xHCI register interface. * This corresponds to the eXtensible Host Controller Interface (xHCI) * Revision 0.95 specification + * + * Registers should always be accessed with double word or quad word accesses. + * + * Some xHCI implementations may support 64-bit address pointers. Registers + * with 64-bit address pointers should be written to with dword accesses by + * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. + * xHCI implementations that do not support 64-bit address pointers will ignore + * the high dword, and write order is irrelevant. */ /** @@ -89,7 +96,6 @@ struct xhci_cap_regs { #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ /* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ -#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f) /* HCSPARAMS3 - hcs_params3 - bitmasks */ /* bits 0:7, Max U1 to U0 latency for the roothub ports */ @@ -160,10 +166,10 @@ struct xhci_op_regs { u32 reserved1; u32 reserved2; u32 dev_notification; - u64 cmd_ring; + u32 cmd_ring[2]; /* rsvd: offset 0x20-2F */ u32 reserved3[4]; - u64 dcbaa_ptr; + u32 dcbaa_ptr[2]; u32 config_reg; /* rsvd: offset 0x3C-3FF */ u32 reserved4[241]; @@ -248,7 +254,7 @@ struct xhci_op_regs { #define CMD_RING_RUNNING (1 << 3) /* bits 4:5 reserved and should be preserved */ /* Command Ring pointer - bit mask for the lower 32 bits. */ -#define CMD_RING_RSVD_BITS (0x3f) +#define CMD_RING_ADDR_MASK (0xffffffc0) /* CONFIG - Configure Register - config_reg bitmasks */ /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ @@ -376,8 +382,8 @@ struct xhci_intr_reg { u32 irq_control; u32 erst_size; u32 rsvd; - u64 erst_base; - u64 erst_dequeue; + u32 erst_base[2]; + u32 erst_dequeue[2]; }; /* irq_pending bitmasks */ @@ -446,27 +452,6 @@ struct xhci_doorbell_array { #define EPI_TO_DB(p) (((p) + 1) & 0xff) -/** - * struct xhci_container_ctx - * @type: Type of context. Used to calculated offsets to contained contexts. - * @size: Size of the context data - * @bytes: The raw context data given to HW - * @dma: dma address of the bytes - * - * Represents either a Device or Input context. Holds a pointer to the raw - * memory used for the context (bytes) and dma address of it (dma). - */ -struct xhci_container_ctx { - unsigned type; -#define XHCI_CTX_TYPE_DEVICE 0x1 -#define XHCI_CTX_TYPE_INPUT 0x2 - - int size; - - u8 *bytes; - dma_addr_t dma; -}; - /** * struct xhci_slot_ctx * @dev_info: Route string, device speed, hub info, and last valid endpoint @@ -553,7 +538,7 @@ struct xhci_slot_ctx { struct xhci_ep_ctx { u32 ep_info; u32 ep_info2; - u64 deq; + u32 deq[2]; u32 tx_info; /* offset 0x14 - 0x1f reserved for HC internal use */ u32 reserved[3]; @@ -604,16 +589,18 @@ struct xhci_ep_ctx { /** - * struct xhci_input_control_context - * Input control context; see section 6.2.5. + * struct xhci_device_control + * Input/Output context; see section 6.2.5. * * @drop_context: set the bit of the endpoint context you want to disable * @add_context: set the bit of the endpoint context you want to enable */ -struct xhci_input_control_ctx { +struct xhci_device_control { u32 drop_flags; u32 add_flags; - u32 rsvd2[6]; + u32 rsvd[6]; + struct xhci_slot_ctx slot; + struct xhci_ep_ctx ep[31]; }; /* drop context bitmasks */ @@ -621,6 +608,7 @@ struct xhci_input_control_ctx { /* add context bitmasks */ #define ADD_EP(x) (0x1 << x) + struct xhci_virt_device { /* * Commands to the hardware are passed an "input context" that @@ -630,10 +618,11 @@ struct xhci_virt_device { * track of input and output contexts separately because * these commands might fail and we don't trust the hardware. */ - struct xhci_container_ctx *out_ctx; + struct xhci_device_control *out_ctx; + dma_addr_t out_ctx_dma; /* Used for addressing devices and configuration changes */ - struct xhci_container_ctx *in_ctx; - + struct xhci_device_control *in_ctx; + dma_addr_t in_ctx_dma; /* FIXME when stream support is added */ struct xhci_ring *ep_rings[31]; /* Temporary storage in case the configure endpoint command fails and we @@ -652,7 +641,7 @@ struct xhci_virt_device { */ struct xhci_device_context_array { /* 64-bit device addresses; we only write 32-bit addresses */ - u64 dev_context_ptrs[MAX_HC_SLOTS]; + u32 dev_context_ptrs[2*MAX_HC_SLOTS]; /* private xHCD pointers */ dma_addr_t dma; }; @@ -665,7 +654,7 @@ struct xhci_device_context_array { struct xhci_stream_ctx { /* 64-bit stream ring address, cycle state, and stream type */ - u64 stream_ring; + u32 stream_ring[2]; /* offset 0x14 - 0x1f reserved for HC internal use */ u32 reserved[2]; }; @@ -673,7 +662,7 @@ struct xhci_stream_ctx { struct xhci_transfer_event { /* 64-bit buffer address, or immediate data */ - u64 buffer; + u32 buffer[2]; u32 transfer_len; /* This field is interpreted differently based on the type of TRB */ u32 flags; @@ -755,7 +744,7 @@ struct xhci_transfer_event { struct xhci_link_trb { /* 64-bit segment pointer*/ - u64 segment_ptr; + u32 segment_ptr[2]; u32 intr_target; u32 control; }; @@ -766,7 +755,7 @@ struct xhci_link_trb { /* Command completion event TRB */ struct xhci_event_cmd { /* Pointer to command TRB, or the value passed by the event data trb */ - u64 cmd_trb; + u32 cmd_trb[2]; u32 status; u32 flags; }; @@ -859,8 +848,8 @@ union xhci_trb { #define TRB_CONFIG_EP 12 /* Evaluate Context Command */ #define TRB_EVAL_CONTEXT 13 -/* Reset Endpoint Command */ -#define TRB_RESET_EP 14 +/* Reset Transfer Ring Command */ +#define TRB_RESET_RING 14 /* Stop Transfer Ring Command */ #define TRB_STOP_RING 15 /* Set Transfer Ring Dequeue Pointer Command */ @@ -940,7 +929,6 @@ struct xhci_ring { unsigned int cancels_pending; unsigned int state; #define SET_DEQ_PENDING (1 << 0) -#define EP_HALTED (1 << 1) /* The TRB that was last reported in a stopped endpoint ring */ union xhci_trb *stopped_trb; struct xhci_td *stopped_td; @@ -952,15 +940,9 @@ struct xhci_ring { u32 cycle_state; }; -struct xhci_dequeue_state { - struct xhci_segment *new_deq_seg; - union xhci_trb *new_deq_ptr; - int new_cycle_state; -}; - struct xhci_erst_entry { /* 64-bit event ring segment address */ - u64 seg_addr; + u32 seg_addr[2]; u32 seg_size; /* Set to zero */ u32 rsvd; @@ -975,13 +957,6 @@ struct xhci_erst { unsigned int erst_size; }; -struct xhci_scratchpad { - u64 *sp_array; - dma_addr_t sp_dma; - void **sp_buffers; - dma_addr_t *sp_dma_buffers; -}; - /* * Each segment table entry is 4*32bits long. 1K seems like an ok size: * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, @@ -1036,9 +1011,6 @@ struct xhci_hcd { struct xhci_ring *cmd_ring; struct xhci_ring *event_ring; struct xhci_erst erst; - /* Scratchpad */ - struct xhci_scratchpad *scratchpad; - /* slot enabling and address device helpers */ struct completion addr_dev; int slot_id; @@ -1099,43 +1071,13 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci, static inline void xhci_writel(struct xhci_hcd *xhci, const unsigned int val, __u32 __iomem *regs) { - xhci_dbg(xhci, - "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", - regs, val); + if (!in_interrupt()) + xhci_dbg(xhci, + "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", + regs, val); writel(val, regs); } -/* - * Registers should always be accessed with double word or quad word accesses. - * - * Some xHCI implementations may support 64-bit address pointers. Registers - * with 64-bit address pointers should be written to with dword accesses by - * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. - * xHCI implementations that do not support 64-bit address pointers will ignore - * the high dword, and write order is irrelevant. - */ -static inline u64 xhci_read_64(const struct xhci_hcd *xhci, - __u64 __iomem *regs) -{ - __u32 __iomem *ptr = (__u32 __iomem *) regs; - u64 val_lo = readl(ptr); - u64 val_hi = readl(ptr + 1); - return val_lo + (val_hi << 32); -} -static inline void xhci_write_64(struct xhci_hcd *xhci, - const u64 val, __u64 __iomem *regs) -{ - __u32 __iomem *ptr = (__u32 __iomem *) regs; - u32 val_lo = lower_32_bits(val); - u32 val_hi = upper_32_bits(val); - - xhci_dbg(xhci, - "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n", - regs, (long unsigned int) val); - writel(val_lo, ptr); - writel(val_hi, ptr + 1); -} - /* xHCI debugging */ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); void xhci_print_registers(struct xhci_hcd *xhci); @@ -1148,7 +1090,7 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring); void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); -void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); +void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep); /* xHCI memory managment */ void xhci_mem_cleanup(struct xhci_hcd *xhci); @@ -1186,7 +1128,6 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); -void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep); int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); @@ -1207,23 +1148,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); -int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, - unsigned int ep_index); -void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, - unsigned int slot_id, unsigned int ep_index, - struct xhci_td *cur_td, struct xhci_dequeue_state *state); -void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, - struct xhci_ring *ep_ring, unsigned int slot_id, - unsigned int ep_index, struct xhci_dequeue_state *deq_state); /* xHCI roothub code */ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength); int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); -/* xHCI contexts */ -struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); -struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); -struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index); - #endif /* __LINUX_XHCI_HCD_H */ diff --git a/trunk/drivers/usb/misc/Kconfig b/trunk/drivers/usb/misc/Kconfig index abe3aa67ed00..a68d91a11bee 100644 --- a/trunk/drivers/usb/misc/Kconfig +++ b/trunk/drivers/usb/misc/Kconfig @@ -220,7 +220,7 @@ config USB_IOWARRIOR config USB_TEST tristate "USB testing driver" - depends on USB + depends on USB && USB_DEVICEFS help This driver is for testing host controller software. It is used with specialized device firmware for regression and stress testing, diff --git a/trunk/drivers/usb/musb/musb_core.c b/trunk/drivers/usb/musb/musb_core.c index c7c1ca0494cd..554a414f65d1 100644 --- a/trunk/drivers/usb/musb/musb_core.c +++ b/trunk/drivers/usb/musb/musb_core.c @@ -1326,6 +1326,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) int i; /* log core options (read using indexed model) */ + musb_ep_select(mbase, 0); reg = musb_read_configdata(mbase); strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); @@ -1989,7 +1990,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) if (status < 0) goto fail2; -#ifdef CONFIG_USB_MUSB_OTG +#ifdef CONFIG_USB_OTG setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); #endif diff --git a/trunk/drivers/usb/musb/musb_gadget_ep0.c b/trunk/drivers/usb/musb/musb_gadget_ep0.c index 7a6778675ad3..40ed50ecedff 100644 --- a/trunk/drivers/usb/musb/musb_gadget_ep0.c +++ b/trunk/drivers/usb/musb/musb_gadget_ep0.c @@ -407,7 +407,7 @@ __acquires(musb->lock) csr |= MUSB_RXCSR_P_SENDSTALL | MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG - | MUSB_RXCSR_P_WZC_BITS; + | MUSB_TXCSR_P_WZC_BITS; musb_writew(regs, MUSB_RXCSR, csr); } diff --git a/trunk/drivers/usb/musb/musb_regs.h b/trunk/drivers/usb/musb/musb_regs.h index fbfd3fd9ce1f..de3b2f18db44 100644 --- a/trunk/drivers/usb/musb/musb_regs.h +++ b/trunk/drivers/usb/musb/musb_regs.h @@ -323,7 +323,6 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off) static inline u8 musb_read_configdata(void __iomem *mbase) { - musb_writeb(mbase, MUSB_INDEX, 0); return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); } diff --git a/trunk/drivers/usb/serial/cp210x.c b/trunk/drivers/usb/serial/cp210x.c index 985cbcf48bda..e9a40b820fd4 100644 --- a/trunk/drivers/usb/serial/cp210x.c +++ b/trunk/drivers/usb/serial/cp210x.c @@ -80,7 +80,6 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ - { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ @@ -97,9 +96,7 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ - { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ - { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ diff --git a/trunk/drivers/usb/serial/ftdi_sio.c b/trunk/drivers/usb/serial/ftdi_sio.c index b574878c78b2..60c64cc5be2a 100644 --- a/trunk/drivers/usb/serial/ftdi_sio.c +++ b/trunk/drivers/usb/serial/ftdi_sio.c @@ -698,7 +698,6 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, - { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; diff --git a/trunk/drivers/usb/serial/ftdi_sio.h b/trunk/drivers/usb/serial/ftdi_sio.h index 24dbd99e87d7..c9fbd7415092 100644 --- a/trunk/drivers/usb/serial/ftdi_sio.h +++ b/trunk/drivers/usb/serial/ftdi_sio.h @@ -946,13 +946,6 @@ #define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */ -/* - * GN Otometrics (http://www.otometrics.com) - * Submitted by Ville Sundberg. - */ -#define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */ -#define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */ - /* * BmRequestType: 1100 0000b * bRequest: FTDI_E2_READ diff --git a/trunk/drivers/usb/serial/mos7840.c b/trunk/drivers/usb/serial/mos7840.c index 270009afdf77..c31940a307f8 100644 --- a/trunk/drivers/usb/serial/mos7840.c +++ b/trunk/drivers/usb/serial/mos7840.c @@ -124,13 +124,10 @@ #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 -/* This driver also supports - * ATEN UC2324 device using Moschip MCS7840 - * ATEN UC2322 device using Moschip MCS7820 - */ +/* This driver also supports the ATEN UC2324 device since it is mos7840 based + * - if I knew the device id it would also support the ATEN UC2322 */ #define USB_VENDOR_ID_ATENINTL 0x0557 #define ATENINTL_DEVICE_ID_UC2324 0x2011 -#define ATENINTL_DEVICE_ID_UC2322 0x7820 /* Interrupt Routine Defines */ @@ -180,7 +177,6 @@ static struct usb_device_id moschip_port_id_table[] = { {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, - {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, {} /* terminating entry */ }; @@ -190,7 +186,6 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = { {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, - {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, {} /* terminating entry */ }; diff --git a/trunk/drivers/usb/serial/option.c b/trunk/drivers/usb/serial/option.c index c784ddbe7b61..98262dd552bb 100644 --- a/trunk/drivers/usb/serial/option.c +++ b/trunk/drivers/usb/serial/option.c @@ -66,10 +66,8 @@ static int option_tiocmget(struct tty_struct *tty, struct file *file); static int option_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear); static int option_send_setup(struct usb_serial_port *port); -#ifdef CONFIG_PM static int option_suspend(struct usb_serial *serial, pm_message_t message); static int option_resume(struct usb_serial *serial); -#endif /* Vendor and product IDs */ #define OPTION_VENDOR_ID 0x0AF0 @@ -207,7 +205,6 @@ static int option_resume(struct usb_serial *serial); #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 #define NOVATELWIRELESS_PRODUCT_U727 0x5010 -#define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 #define NOVATELWIRELESS_PRODUCT_MC760 0x6000 #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 @@ -262,6 +259,11 @@ static int option_resume(struct usb_serial *serial); #define AXESSTEL_VENDOR_ID 0x1726 #define AXESSTEL_PRODUCT_MV110H 0x1000 +#define ONDA_VENDOR_ID 0x19d2 +#define ONDA_PRODUCT_MSA501HS 0x0001 +#define ONDA_PRODUCT_ET502HS 0x0002 +#define ONDA_PRODUCT_MT503HS 0x2000 + #define BANDRICH_VENDOR_ID 0x1A8D #define BANDRICH_PRODUCT_C100_1 0x1002 #define BANDRICH_PRODUCT_C100_2 0x1003 @@ -299,7 +301,6 @@ static int option_resume(struct usb_serial *serial); #define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF626 0x0031 #define ZTE_PRODUCT_CDMA_TECH 0xfffe -#define ZTE_PRODUCT_AC8710 0xfff1 #define BENQ_VENDOR_ID 0x04a5 #define BENQ_PRODUCT_H10 0x4068 @@ -321,11 +322,6 @@ static int option_resume(struct usb_serial *serial); #define ALINK_VENDOR_ID 0x1e0e #define ALINK_PRODUCT_3GU 0x9200 -/* ALCATEL PRODUCTS */ -#define ALCATEL_VENDOR_ID 0x1bbb -#define ALCATEL_PRODUCT_X060S 0x0000 - - static struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -442,7 +438,6 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ @@ -479,6 +474,42 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, + { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) }, + { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0003) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0004) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0005) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0006) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0007) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0008) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0009) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x000a) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x000b) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x000c) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x000d) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x000e) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x000f) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0010) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0011) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0012) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0013) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0014) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0015) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0016) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0017) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0018) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0019) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0020) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0021) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0022) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0023) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0024) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0025) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0026) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0027) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0028) }, + { USB_DEVICE(ONDA_VENDOR_ID, 0x0029) }, + { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MT503HS) }, { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, @@ -503,75 +534,10 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0007, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622) }, + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) }, + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, @@ -581,7 +547,6 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, - { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); @@ -590,10 +555,8 @@ static struct usb_driver option_driver = { .name = "option", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, -#ifdef CONFIG_PM .suspend = usb_serial_suspend, .resume = usb_serial_resume, -#endif .id_table = option_ids, .no_dynamic_id = 1, }; @@ -625,10 +588,8 @@ static struct usb_serial_driver option_1port_device = { .disconnect = option_disconnect, .release = option_release, .read_int_callback = option_instat_callback, -#ifdef CONFIG_PM .suspend = option_suspend, .resume = option_resume, -#endif }; static int debug; @@ -870,6 +831,7 @@ static void option_instat_callback(struct urb *urb) int status = urb->status; struct usb_serial_port *port = urb->context; struct option_port_private *portdata = usb_get_serial_port_data(port); + struct usb_serial *serial = port->serial; dbg("%s", __func__); dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); @@ -965,6 +927,7 @@ static int option_open(struct tty_struct *tty, struct usb_serial_port *port, struct file *filp) { struct option_port_private *portdata; + struct usb_serial *serial = port->serial; int i, err; struct urb *urb; @@ -1224,7 +1187,6 @@ static void option_release(struct usb_serial *serial) } } -#ifdef CONFIG_PM static int option_suspend(struct usb_serial *serial, pm_message_t message) { dbg("%s entered", __func__); @@ -1283,7 +1245,6 @@ static int option_resume(struct usb_serial *serial) } return 0; } -#endif MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/trunk/drivers/usb/storage/transport.c b/trunk/drivers/usb/storage/transport.c index e20dc525d177..fcb320217218 100644 --- a/trunk/drivers/usb/storage/transport.c +++ b/trunk/drivers/usb/storage/transport.c @@ -961,7 +961,7 @@ int usb_stor_Bulk_max_lun(struct us_data *us) US_BULK_GET_MAX_LUN, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - 0, us->ifnum, us->iobuf, 1, 10*HZ); + 0, us->ifnum, us->iobuf, 1, HZ); US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", result, us->iobuf[0]); diff --git a/trunk/fs/btrfs/async-thread.c b/trunk/fs/btrfs/async-thread.c index 019e8af449ab..6e4f6c50a120 100644 --- a/trunk/fs/btrfs/async-thread.c +++ b/trunk/fs/btrfs/async-thread.c @@ -424,11 +424,11 @@ int btrfs_requeue_work(struct btrfs_work *work) * list */ if (worker->idle) { - spin_lock(&worker->workers->lock); + spin_lock_irqsave(&worker->workers->lock, flags); worker->idle = 0; list_move_tail(&worker->worker_list, &worker->workers->worker_list); - spin_unlock(&worker->workers->lock); + spin_unlock_irqrestore(&worker->workers->lock, flags); } if (!worker->working) { wake = 1; diff --git a/trunk/fs/btrfs/ctree.c b/trunk/fs/btrfs/ctree.c index 3fdcc0512d3a..60a45f3a4e91 100644 --- a/trunk/fs/btrfs/ctree.c +++ b/trunk/fs/btrfs/ctree.c @@ -557,7 +557,19 @@ static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) btrfs_disk_key_to_cpu(&k1, disk); - return btrfs_comp_cpu_keys(&k1, k2); + if (k1.objectid > k2->objectid) + return 1; + if (k1.objectid < k2->objectid) + return -1; + if (k1.type > k2->type) + return 1; + if (k1.type < k2->type) + return -1; + if (k1.offset > k2->offset) + return 1; + if (k1.offset < k2->offset) + return -1; + return 0; } /* @@ -1040,6 +1052,9 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, BTRFS_NODEPTRS_PER_BLOCK(root) / 4) return 0; + if (btrfs_header_nritems(mid) > 2) + return 0; + if (btrfs_header_nritems(mid) < 2) err_on_enospc = 1; @@ -1686,7 +1701,6 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root struct extent_buffer *b; int slot; int ret; - int err; int level; int lowest_unlock = 1; u8 lowest_level = 0; @@ -1723,6 +1737,8 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root p->locks[level] = 1; if (cow) { + int wret; + /* * if we don't really need to cow this block * then we don't want to set the path blocking, @@ -1733,12 +1749,12 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root btrfs_set_path_blocking(p); - err = btrfs_cow_block(trans, root, b, - p->nodes[level + 1], - p->slots[level + 1], &b); - if (err) { + wret = btrfs_cow_block(trans, root, b, + p->nodes[level + 1], + p->slots[level + 1], &b); + if (wret) { free_extent_buffer(b); - ret = err; + ret = wret; goto done; } } @@ -1777,45 +1793,41 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root ret = bin_search(b, key, level, &slot); if (level != 0) { - int dec = 0; - if (ret && slot > 0) { - dec = 1; + if (ret && slot > 0) slot -= 1; - } p->slots[level] = slot; - err = setup_nodes_for_search(trans, root, p, b, level, + ret = setup_nodes_for_search(trans, root, p, b, level, ins_len); - if (err == -EAGAIN) + if (ret == -EAGAIN) goto again; - if (err) { - ret = err; + else if (ret) goto done; - } b = p->nodes[level]; slot = p->slots[level]; unlock_up(p, level, lowest_unlock); + /* this is only true while dropping a snapshot */ if (level == lowest_level) { - if (dec) - p->slots[level]++; + ret = 0; goto done; } - err = read_block_for_search(trans, root, p, + ret = read_block_for_search(trans, root, p, &b, level, slot, key); - if (err == -EAGAIN) + if (ret == -EAGAIN) goto again; - if (err) { - ret = err; + + if (ret == -EIO) goto done; - } if (!p->skip_locking) { + int lret; + btrfs_clear_path_blocking(p, NULL); - err = btrfs_try_spin_lock(b); + lret = btrfs_try_spin_lock(b); - if (!err) { + if (!lret) { btrfs_set_path_blocking(p); btrfs_tree_lock(b); btrfs_clear_path_blocking(p, b); @@ -1825,14 +1837,16 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root p->slots[level] = slot; if (ins_len > 0 && btrfs_leaf_free_space(root, b) < ins_len) { + int sret; + btrfs_set_path_blocking(p); - err = split_leaf(trans, root, key, - p, ins_len, ret == 0); + sret = split_leaf(trans, root, key, + p, ins_len, ret == 0); btrfs_clear_path_blocking(p, NULL); - BUG_ON(err > 0); - if (err) { - ret = err; + BUG_ON(sret > 0); + if (sret) { + ret = sret; goto done; } } @@ -3793,7 +3807,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, } /* delete the leaf if it is mostly empty */ - if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) { + if (used < BTRFS_LEAF_DATA_SIZE(root) / 2) { /* push_leaf_left fixes the path. * make sure the path still points to our leaf * for possible call to del_ptr below @@ -4028,9 +4042,10 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, * calling this function. */ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *key, int level, + struct btrfs_key *key, int lowest_level, int cache_only, u64 min_trans) { + int level = lowest_level; int slot; struct extent_buffer *c; @@ -4043,40 +4058,11 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, c = path->nodes[level]; next: if (slot >= btrfs_header_nritems(c)) { - int ret; - int orig_lowest; - struct btrfs_key cur_key; - if (level + 1 >= BTRFS_MAX_LEVEL || - !path->nodes[level + 1]) + level++; + if (level == BTRFS_MAX_LEVEL) return 1; - - if (path->locks[level + 1]) { - level++; - continue; - } - - slot = btrfs_header_nritems(c) - 1; - if (level == 0) - btrfs_item_key_to_cpu(c, &cur_key, slot); - else - btrfs_node_key_to_cpu(c, &cur_key, slot); - - orig_lowest = path->lowest_level; - btrfs_release_path(root, path); - path->lowest_level = level; - ret = btrfs_search_slot(NULL, root, &cur_key, path, - 0, 0); - path->lowest_level = orig_lowest; - if (ret < 0) - return ret; - - c = path->nodes[level]; - slot = path->slots[level]; - if (ret == 0) - slot++; - goto next; + continue; } - if (level == 0) btrfs_item_key_to_cpu(c, key, slot); else { @@ -4160,8 +4146,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) * advance the path if there are now more items available. */ if (nritems > 0 && path->slots[0] < nritems - 1) { - if (ret == 0) - path->slots[0]++; + path->slots[0]++; ret = 0; goto done; } @@ -4293,10 +4278,10 @@ int btrfs_previous_item(struct btrfs_root *root, path->slots[0]--; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid < min_objectid) - break; if (found_key.type == type) return 0; + if (found_key.objectid < min_objectid) + break; if (found_key.objectid == min_objectid && found_key.type < type) break; diff --git a/trunk/fs/btrfs/ctree.h b/trunk/fs/btrfs/ctree.h index 215ef8cae823..98a873838717 100644 --- a/trunk/fs/btrfs/ctree.h +++ b/trunk/fs/btrfs/ctree.h @@ -481,7 +481,7 @@ struct btrfs_shared_data_ref { struct btrfs_extent_inline_ref { u8 type; - __le64 offset; + u64 offset; } __attribute__ ((__packed__)); /* old style backrefs item */ @@ -689,7 +689,6 @@ struct btrfs_space_info { struct list_head block_groups; spinlock_t lock; struct rw_semaphore groups_sem; - atomic_t caching_threads; }; /* @@ -708,9 +707,6 @@ struct btrfs_free_cluster { /* first extent starting offset */ u64 window_start; - /* if this cluster simply points at a bitmap in the block group */ - bool points_to_bitmap; - struct btrfs_block_group_cache *block_group; /* * when a cluster is allocated from a block group, we put the @@ -720,37 +716,24 @@ struct btrfs_free_cluster { struct list_head block_group_list; }; -enum btrfs_caching_type { - BTRFS_CACHE_NO = 0, - BTRFS_CACHE_STARTED = 1, - BTRFS_CACHE_FINISHED = 2, -}; - struct btrfs_block_group_cache { struct btrfs_key key; struct btrfs_block_group_item item; - struct btrfs_fs_info *fs_info; spinlock_t lock; + struct mutex cache_mutex; u64 pinned; u64 reserved; u64 flags; - u64 sectorsize; - int extents_thresh; - int free_extents; - int total_bitmaps; + int cached; int ro; int dirty; - /* cache tracking stuff */ - wait_queue_head_t caching_q; - int cached; - struct btrfs_space_info *space_info; /* free space cache stuff */ spinlock_t tree_lock; + struct rb_root free_space_bytes; struct rb_root free_space_offset; - u64 free_space; /* block group cache stuff */ struct rb_node cache_node; @@ -959,9 +942,6 @@ struct btrfs_root { /* the node lock is held while changing the node pointer */ spinlock_t node_lock; - /* taken when updating the commit root */ - struct rw_semaphore commit_root_sem; - struct extent_buffer *commit_root; struct btrfs_root *log_root; struct btrfs_root *reloc_root; @@ -2008,7 +1988,6 @@ void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode, u64 bytes); void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode, u64 bytes); -void btrfs_free_pinned_extents(struct btrfs_fs_info *info); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot); diff --git a/trunk/fs/btrfs/disk-io.c b/trunk/fs/btrfs/disk-io.c index 7dcaa8138864..d28d29c95f7c 100644 --- a/trunk/fs/btrfs/disk-io.c +++ b/trunk/fs/btrfs/disk-io.c @@ -909,7 +909,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, spin_lock_init(&root->inode_lock); mutex_init(&root->objectid_mutex); mutex_init(&root->log_mutex); - init_rwsem(&root->commit_root_sem); init_waitqueue_head(&root->log_writer_wait); init_waitqueue_head(&root->log_commit_wait[0]); init_waitqueue_head(&root->log_commit_wait[1]); @@ -1800,11 +1799,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_super_chunk_root(disk_super), blocksize, generation); BUG_ON(!chunk_root->node); - if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { - printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n", - sb->s_id); - goto fail_chunk_root; - } btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); chunk_root->commit_root = btrfs_root_node(chunk_root); @@ -1832,11 +1826,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, blocksize, generation); if (!tree_root->node) goto fail_chunk_root; - if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) { - printk(KERN_WARNING "btrfs: failed to read tree root on %s\n", - sb->s_id); - goto fail_tree_root; - } btrfs_set_root_node(&tree_root->root_item, tree_root->node); tree_root->commit_root = btrfs_root_node(tree_root); @@ -2333,9 +2322,6 @@ int close_ctree(struct btrfs_root *root) printk(KERN_ERR "btrfs: commit super ret %d\n", ret); } - fs_info->closing = 2; - smp_mb(); - if (fs_info->delalloc_bytes) { printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", (unsigned long long)fs_info->delalloc_bytes); @@ -2357,7 +2343,6 @@ int close_ctree(struct btrfs_root *root) free_extent_buffer(root->fs_info->csum_root->commit_root); btrfs_free_block_groups(root->fs_info); - btrfs_free_pinned_extents(root->fs_info); del_fs_roots(fs_info); diff --git a/trunk/fs/btrfs/extent-tree.c b/trunk/fs/btrfs/extent-tree.c index fadf69a2764b..a5aca3997d42 100644 --- a/trunk/fs/btrfs/extent-tree.c +++ b/trunk/fs/btrfs/extent-tree.c @@ -21,7 +21,6 @@ #include #include #include -#include #include "compat.h" #include "hash.h" #include "ctree.h" @@ -62,13 +61,6 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, u64 alloc_bytes, u64 flags, int force); -static noinline int -block_group_cache_done(struct btrfs_block_group_cache *cache) -{ - smp_mb(); - return cache->cached == BTRFS_CACHE_FINISHED; -} - static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) { return (cache->flags & bits) == bits; @@ -153,71 +145,21 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, return ret; } -/* - * We always set EXTENT_LOCKED for the super mirror extents so we don't - * overwrite them, so those bits need to be unset. Also, if we are unmounting - * with pinned extents still sitting there because we had a block group caching, - * we need to clear those now, since we are done. - */ -void btrfs_free_pinned_extents(struct btrfs_fs_info *info) -{ - u64 start, end, last = 0; - int ret; - - while (1) { - ret = find_first_extent_bit(&info->pinned_extents, last, - &start, &end, - EXTENT_LOCKED|EXTENT_DIRTY); - if (ret) - break; - - clear_extent_bits(&info->pinned_extents, start, end, - EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS); - last = end+1; - } -} - -static int remove_sb_from_cache(struct btrfs_root *root, - struct btrfs_block_group_cache *cache) -{ - struct btrfs_fs_info *fs_info = root->fs_info; - u64 bytenr; - u64 *logical; - int stripe_len; - int i, nr, ret; - - for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { - bytenr = btrfs_sb_offset(i); - ret = btrfs_rmap_block(&root->fs_info->mapping_tree, - cache->key.objectid, bytenr, - 0, &logical, &nr, &stripe_len); - BUG_ON(ret); - while (nr--) { - try_lock_extent(&fs_info->pinned_extents, - logical[nr], - logical[nr] + stripe_len - 1, GFP_NOFS); - } - kfree(logical); - } - - return 0; -} - /* * this is only called by cache_block_group, since we could have freed extents * we need to check the pinned_extents for any extents that can't be used yet * since their free space will be released as soon as the transaction commits. */ -static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, +static int add_new_free_space(struct btrfs_block_group_cache *block_group, struct btrfs_fs_info *info, u64 start, u64 end) { - u64 extent_start, extent_end, size, total_added = 0; + u64 extent_start, extent_end, size; int ret; while (start < end) { ret = find_first_extent_bit(&info->pinned_extents, start, &extent_start, &extent_end, - EXTENT_DIRTY|EXTENT_LOCKED); + EXTENT_DIRTY); if (ret) break; @@ -225,7 +167,6 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, start = extent_end + 1; } else if (extent_start > start && extent_start < end) { size = extent_start - start; - total_added += size; ret = btrfs_add_free_space(block_group, start, size); BUG_ON(ret); @@ -237,79 +178,84 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, if (start < end) { size = end - start; - total_added += size; ret = btrfs_add_free_space(block_group, start, size); BUG_ON(ret); } - return total_added; + return 0; } -static int caching_kthread(void *data) +static int remove_sb_from_cache(struct btrfs_root *root, + struct btrfs_block_group_cache *cache) +{ + u64 bytenr; + u64 *logical; + int stripe_len; + int i, nr, ret; + + for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { + bytenr = btrfs_sb_offset(i); + ret = btrfs_rmap_block(&root->fs_info->mapping_tree, + cache->key.objectid, bytenr, 0, + &logical, &nr, &stripe_len); + BUG_ON(ret); + while (nr--) { + btrfs_remove_free_space(cache, logical[nr], + stripe_len); + } + kfree(logical); + } + return 0; +} + +static int cache_block_group(struct btrfs_root *root, + struct btrfs_block_group_cache *block_group) { - struct btrfs_block_group_cache *block_group = data; - struct btrfs_fs_info *fs_info = block_group->fs_info; - u64 last = 0; struct btrfs_path *path; int ret = 0; struct btrfs_key key; struct extent_buffer *leaf; int slot; - u64 total_found = 0; + u64 last; + + if (!block_group) + return 0; - BUG_ON(!fs_info); + root = root->fs_info->extent_root; + + if (block_group->cached) + return 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; - atomic_inc(&block_group->space_info->caching_threads); - last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); -again: - /* need to make sure the commit_root doesn't disappear */ - down_read(&fs_info->extent_root->commit_root_sem); - + path->reada = 2; /* - * We don't want to deadlock with somebody trying to allocate a new - * extent for the extent root while also trying to search the extent - * root to add free space. So we skip locking and search the commit - * root, since its read-only + * we get into deadlocks with paths held by callers of this function. + * since the alloc_mutex is protecting things right now, just + * skip the locking here */ path->skip_locking = 1; - path->search_commit_root = 1; - path->reada = 2; - + last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); key.objectid = last; key.offset = 0; btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); - ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto err; while (1) { - smp_mb(); - if (block_group->fs_info->closing > 1) { - last = (u64)-1; - break; - } - leaf = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { - ret = btrfs_next_leaf(fs_info->extent_root, path); + ret = btrfs_next_leaf(root, path); if (ret < 0) goto err; - else if (ret) + if (ret == 0) + continue; + else break; - - if (need_resched()) { - btrfs_release_path(fs_info->extent_root, path); - up_read(&fs_info->extent_root->commit_root_sem); - cond_resched(); - goto again; - } - - continue; } btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid < block_group->key.objectid) @@ -320,59 +266,24 @@ static int caching_kthread(void *data) break; if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) { - total_found += add_new_free_space(block_group, - fs_info, last, - key.objectid); - last = key.objectid + key.offset; - } + add_new_free_space(block_group, root->fs_info, last, + key.objectid); - if (total_found > (1024 * 1024 * 2)) { - total_found = 0; - wake_up(&block_group->caching_q); + last = key.objectid + key.offset; } next: path->slots[0]++; } - ret = 0; - total_found += add_new_free_space(block_group, fs_info, last, - block_group->key.objectid + - block_group->key.offset); - - spin_lock(&block_group->lock); - block_group->cached = BTRFS_CACHE_FINISHED; - spin_unlock(&block_group->lock); + add_new_free_space(block_group, root->fs_info, last, + block_group->key.objectid + + block_group->key.offset); + block_group->cached = 1; + remove_sb_from_cache(root, block_group); + ret = 0; err: btrfs_free_path(path); - up_read(&fs_info->extent_root->commit_root_sem); - atomic_dec(&block_group->space_info->caching_threads); - wake_up(&block_group->caching_q); - - return 0; -} - -static int cache_block_group(struct btrfs_block_group_cache *cache) -{ - struct task_struct *tsk; - int ret = 0; - - spin_lock(&cache->lock); - if (cache->cached != BTRFS_CACHE_NO) { - spin_unlock(&cache->lock); - return ret; - } - cache->cached = BTRFS_CACHE_STARTED; - spin_unlock(&cache->lock); - - tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", - cache->key.objectid); - if (IS_ERR(tsk)) { - ret = PTR_ERR(tsk); - printk(KERN_ERR "error running thread %d\n", ret); - BUG(); - } - return ret; } @@ -2476,29 +2387,13 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, } -static struct btrfs_block_group_cache * -next_block_group(struct btrfs_root *root, - struct btrfs_block_group_cache *cache) -{ - struct rb_node *node; - spin_lock(&root->fs_info->block_group_cache_lock); - node = rb_next(&cache->cache_node); - btrfs_put_block_group(cache); - if (node) { - cache = rb_entry(node, struct btrfs_block_group_cache, - cache_node); - atomic_inc(&cache->count); - } else - cache = NULL; - spin_unlock(&root->fs_info->block_group_cache_lock); - return cache; -} - int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group_cache *cache, *entry; + struct rb_node *n; int err = 0; + int werr = 0; struct btrfs_path *path; u64 last = 0; @@ -2507,35 +2402,39 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, return -ENOMEM; while (1) { - if (last == 0) { - err = btrfs_run_delayed_refs(trans, root, - (unsigned long)-1); - BUG_ON(err); - } - - cache = btrfs_lookup_first_block_group(root->fs_info, last); - while (cache) { - if (cache->dirty) - break; - cache = next_block_group(root, cache); - } - if (!cache) { - if (last == 0) + cache = NULL; + spin_lock(&root->fs_info->block_group_cache_lock); + for (n = rb_first(&root->fs_info->block_group_cache_tree); + n; n = rb_next(n)) { + entry = rb_entry(n, struct btrfs_block_group_cache, + cache_node); + if (entry->dirty) { + cache = entry; break; - last = 0; - continue; + } } + spin_unlock(&root->fs_info->block_group_cache_lock); + + if (!cache) + break; cache->dirty = 0; - last = cache->key.objectid + cache->key.offset; + last += cache->key.offset; - err = write_one_cache_group(trans, root, path, cache); - BUG_ON(err); - btrfs_put_block_group(cache); + err = write_one_cache_group(trans, root, + path, cache); + /* + * if we fail to write the cache group, we want + * to keep it marked dirty in hopes that a later + * write will work + */ + if (err) { + werr = err; + continue; + } } - btrfs_free_path(path); - return 0; + return werr; } int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) @@ -2585,7 +2484,6 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, found->force_alloc = 0; *space_info = found; list_add_rcu(&found->list, &info->space_info); - atomic_set(&found->caching_threads, 0); return 0; } @@ -3049,9 +2947,13 @@ int btrfs_update_pinned_extents(struct btrfs_root *root, struct btrfs_block_group_cache *cache; struct btrfs_fs_info *fs_info = root->fs_info; - if (pin) + if (pin) { set_extent_dirty(&fs_info->pinned_extents, bytenr, bytenr + num - 1, GFP_NOFS); + } else { + clear_extent_dirty(&fs_info->pinned_extents, + bytenr, bytenr + num - 1, GFP_NOFS); + } while (num > 0) { cache = btrfs_lookup_block_group(fs_info, bytenr); @@ -3067,34 +2969,14 @@ int btrfs_update_pinned_extents(struct btrfs_root *root, spin_unlock(&cache->space_info->lock); fs_info->total_pinned += len; } else { - int unpin = 0; - - /* - * in order to not race with the block group caching, we - * only want to unpin the extent if we are cached. If - * we aren't cached, we want to start async caching this - * block group so we can free the extent the next time - * around. - */ spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); - unpin = (cache->cached == BTRFS_CACHE_FINISHED); - if (likely(unpin)) { - cache->pinned -= len; - cache->space_info->bytes_pinned -= len; - fs_info->total_pinned -= len; - } + cache->pinned -= len; + cache->space_info->bytes_pinned -= len; spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - - if (likely(unpin)) - clear_extent_dirty(&fs_info->pinned_extents, - bytenr, bytenr + len -1, - GFP_NOFS); - else - cache_block_group(cache); - - if (unpin) + fs_info->total_pinned -= len; + if (cache->cached) btrfs_add_free_space(cache, bytenr, len); } btrfs_put_block_group(cache); @@ -3148,7 +3030,6 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy) &start, &end, EXTENT_DIRTY); if (ret) break; - set_extent_dirty(copy, start, end, GFP_NOFS); last = end + 1; } @@ -3177,7 +3058,6 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, cond_resched(); } - return ret; } @@ -3555,45 +3435,6 @@ static u64 stripe_align(struct btrfs_root *root, u64 val) return ret; } -/* - * when we wait for progress in the block group caching, its because - * our allocation attempt failed at least once. So, we must sleep - * and let some progress happen before we try again. - * - * This function will sleep at least once waiting for new free space to - * show up, and then it will check the block group free space numbers - * for our min num_bytes. Another option is to have it go ahead - * and look in the rbtree for a free extent of a given size, but this - * is a good start. - */ -static noinline int -wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, - u64 num_bytes) -{ - DEFINE_WAIT(wait); - - prepare_to_wait(&cache->caching_q, &wait, TASK_UNINTERRUPTIBLE); - - if (block_group_cache_done(cache)) { - finish_wait(&cache->caching_q, &wait); - return 0; - } - schedule(); - finish_wait(&cache->caching_q, &wait); - - wait_event(cache->caching_q, block_group_cache_done(cache) || - (cache->free_space >= num_bytes)); - return 0; -} - -enum btrfs_loop_type { - LOOP_CACHED_ONLY = 0, - LOOP_CACHING_NOWAIT = 1, - LOOP_CACHING_WAIT = 2, - LOOP_ALLOC_CHUNK = 3, - LOOP_NO_EMPTY_SIZE = 4, -}; - /* * walks the btree of allocated extents and find a hole of a given size. * The key ins is changed to record the hole: @@ -3619,7 +3460,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_space_info *space_info; int last_ptr_loop = 0; int loop = 0; - bool found_uncached_bg = false; WARN_ON(num_bytes < root->sectorsize); btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); @@ -3651,18 +3491,15 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, search_start = max(search_start, first_logical_byte(root, 0)); search_start = max(search_start, hint_byte); - if (!last_ptr) + if (!last_ptr) { empty_cluster = 0; + loop = 1; + } if (search_start == hint_byte) { block_group = btrfs_lookup_block_group(root->fs_info, search_start); - /* - * we don't want to use the block group if it doesn't match our - * allocation bits, or if its not cached. - */ - if (block_group && block_group_bits(block_group, data) && - block_group_cache_done(block_group)) { + if (block_group && block_group_bits(block_group, data)) { down_read(&space_info->groups_sem); if (list_empty(&block_group->list) || block_group->ro) { @@ -3685,35 +3522,21 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, down_read(&space_info->groups_sem); list_for_each_entry(block_group, &space_info->block_groups, list) { u64 offset; - int cached; atomic_inc(&block_group->count); search_start = block_group->key.objectid; have_block_group: - if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { - /* - * we want to start caching kthreads, but not too many - * right off the bat so we don't overwhelm the system, - * so only start them if there are less than 2 and we're - * in the initial allocation phase. - */ - if (loop > LOOP_CACHING_NOWAIT || - atomic_read(&space_info->caching_threads) < 2) { - ret = cache_block_group(block_group); - BUG_ON(ret); + if (unlikely(!block_group->cached)) { + mutex_lock(&block_group->cache_mutex); + ret = cache_block_group(root, block_group); + mutex_unlock(&block_group->cache_mutex); + if (ret) { + btrfs_put_block_group(block_group); + break; } } - cached = block_group_cache_done(block_group); - if (unlikely(!cached)) { - found_uncached_bg = true; - - /* if we only want cached bgs, loop */ - if (loop == LOOP_CACHED_ONLY) - goto loop; - } - if (unlikely(block_group->ro)) goto loop; @@ -3792,21 +3615,14 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, spin_unlock(&last_ptr->refill_lock); goto checks; } - } else if (!cached && loop > LOOP_CACHING_NOWAIT) { - spin_unlock(&last_ptr->refill_lock); - - wait_block_group_cache_progress(block_group, - num_bytes + empty_cluster + empty_size); - goto have_block_group; } - /* * at this point we either didn't find a cluster * or we weren't able to allocate a block from our * cluster. Free the cluster we've been trying * to use, and go to the next block group */ - if (loop < LOOP_NO_EMPTY_SIZE) { + if (loop < 2) { btrfs_return_cluster_to_free_space(NULL, last_ptr); spin_unlock(&last_ptr->refill_lock); @@ -3817,17 +3633,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, offset = btrfs_find_space_for_alloc(block_group, search_start, num_bytes, empty_size); - if (!offset && (cached || (!cached && - loop == LOOP_CACHING_NOWAIT))) { + if (!offset) goto loop; - } else if (!offset && (!cached && - loop > LOOP_CACHING_NOWAIT)) { - wait_block_group_cache_progress(block_group, - num_bytes + empty_size); - goto have_block_group; - } checks: search_start = stripe_align(root, offset); + /* move on to the next group */ if (search_start + num_bytes >= search_end) { btrfs_add_free_space(block_group, offset, num_bytes); @@ -3873,26 +3683,13 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, } up_read(&space_info->groups_sem); - /* LOOP_CACHED_ONLY, only search fully cached block groups - * LOOP_CACHING_NOWAIT, search partially cached block groups, but - * dont wait foR them to finish caching - * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching - * LOOP_ALLOC_CHUNK, force a chunk allocation and try again - * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try - * again + /* loop == 0, try to find a clustered alloc in every block group + * loop == 1, try again after forcing a chunk allocation + * loop == 2, set empty_size and empty_cluster to 0 and try again */ - if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && - (found_uncached_bg || empty_size || empty_cluster || - allowed_chunk_alloc)) { - if (found_uncached_bg) { - found_uncached_bg = false; - if (loop < LOOP_CACHING_WAIT) { - loop++; - goto search; - } - } - - if (loop == LOOP_ALLOC_CHUNK) { + if (!ins->objectid && loop < 3 && + (empty_size || empty_cluster || allowed_chunk_alloc)) { + if (loop >= 2) { empty_size = 0; empty_cluster = 0; } @@ -3905,7 +3702,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, space_info->force_alloc = 1; } - if (loop < LOOP_NO_EMPTY_SIZE) { + if (loop < 3) { loop++; goto search; } @@ -4001,7 +3798,7 @@ static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans, num_bytes, data, 1); goto again; } - if (ret == -ENOSPC) { + if (ret) { struct btrfs_space_info *sinfo; sinfo = __find_space_info(root->fs_info, data); @@ -4009,6 +3806,7 @@ static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans, "wanted %llu\n", (unsigned long long)data, (unsigned long long)num_bytes); dump_space_info(sinfo, num_bytes); + BUG(); } return ret; @@ -4046,9 +3844,7 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans, ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size, empty_size, hint_byte, search_end, ins, data); - if (!ret) - update_reserved_extents(root, ins->objectid, ins->offset, 1); - + update_reserved_extents(root, ins->objectid, ins->offset, 1); return ret; } @@ -4210,9 +4006,9 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group; block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); - cache_block_group(block_group); - wait_event(block_group->caching_q, - block_group_cache_done(block_group)); + mutex_lock(&block_group->cache_mutex); + cache_block_group(root, block_group); + mutex_unlock(&block_group->cache_mutex); ret = btrfs_remove_free_space(block_group, ins->objectid, ins->offset); @@ -4243,8 +4039,7 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans, ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes, empty_size, hint_byte, search_end, ins, 0); - if (ret) - return ret; + BUG_ON(ret); if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { if (parent == 0) @@ -7160,16 +6955,11 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) &info->block_group_cache_tree); spin_unlock(&info->block_group_cache_lock); + btrfs_remove_free_space_cache(block_group); down_write(&block_group->space_info->groups_sem); list_del(&block_group->list); up_write(&block_group->space_info->groups_sem); - if (block_group->cached == BTRFS_CACHE_STARTED) - wait_event(block_group->caching_q, - block_group_cache_done(block_group)); - - btrfs_remove_free_space_cache(block_group); - WARN_ON(atomic_read(&block_group->count) != 1); kfree(block_group); @@ -7235,19 +7025,9 @@ int btrfs_read_block_groups(struct btrfs_root *root) atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); spin_lock_init(&cache->tree_lock); - cache->fs_info = info; - init_waitqueue_head(&cache->caching_q); + mutex_init(&cache->cache_mutex); INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); - - /* - * we only want to have 32k of ram per block group for keeping - * track of free space, and if we pass 1/2 of that we want to - * start converting things over to using bitmaps - */ - cache->extents_thresh = ((1024 * 32) / 2) / - sizeof(struct btrfs_free_space); - read_extent_buffer(leaf, &cache->item, btrfs_item_ptr_offset(leaf, path->slots[0]), sizeof(cache->item)); @@ -7256,26 +7036,6 @@ int btrfs_read_block_groups(struct btrfs_root *root) key.objectid = found_key.objectid + found_key.offset; btrfs_release_path(root, path); cache->flags = btrfs_block_group_flags(&cache->item); - cache->sectorsize = root->sectorsize; - - remove_sb_from_cache(root, cache); - - /* - * check for two cases, either we are full, and therefore - * don't need to bother with the caching work since we won't - * find any space, or we are empty, and we can just add all - * the space in and be done with it. This saves us _alot_ of - * time, particularly in the full case. - */ - if (found_key.offset == btrfs_block_group_used(&cache->item)) { - cache->cached = BTRFS_CACHE_FINISHED; - } else if (btrfs_block_group_used(&cache->item) == 0) { - cache->cached = BTRFS_CACHE_FINISHED; - add_new_free_space(cache, root->fs_info, - found_key.objectid, - found_key.objectid + - found_key.offset); - } ret = update_space_info(info, cache->flags, found_key.offset, btrfs_block_group_used(&cache->item), @@ -7319,19 +7079,10 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache->key.objectid = chunk_offset; cache->key.offset = size; cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; - cache->sectorsize = root->sectorsize; - - /* - * we only want to have 32k of ram per block group for keeping track - * of free space, and if we pass 1/2 of that we want to start - * converting things over to using bitmaps - */ - cache->extents_thresh = ((1024 * 32) / 2) / - sizeof(struct btrfs_free_space); atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); spin_lock_init(&cache->tree_lock); - init_waitqueue_head(&cache->caching_q); + mutex_init(&cache->cache_mutex); INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); @@ -7340,12 +7091,6 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache->flags = type; btrfs_set_block_group_flags(&cache->item, type); - cache->cached = BTRFS_CACHE_FINISHED; - remove_sb_from_cache(root, cache); - - add_new_free_space(cache, root->fs_info, chunk_offset, - chunk_offset + size); - ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, &cache->space_info); BUG_ON(ret); @@ -7404,7 +7149,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, rb_erase(&block_group->cache_node, &root->fs_info->block_group_cache_tree); spin_unlock(&root->fs_info->block_group_cache_lock); - + btrfs_remove_free_space_cache(block_group); down_write(&block_group->space_info->groups_sem); /* * we must use list_del_init so people can check to see if they @@ -7413,18 +7158,11 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, list_del_init(&block_group->list); up_write(&block_group->space_info->groups_sem); - if (block_group->cached == BTRFS_CACHE_STARTED) - wait_event(block_group->caching_q, - block_group_cache_done(block_group)); - - btrfs_remove_free_space_cache(block_group); - spin_lock(&block_group->space_info->lock); block_group->space_info->total_bytes -= block_group->key.offset; block_group->space_info->bytes_readonly -= block_group->key.offset; spin_unlock(&block_group->space_info->lock); - - btrfs_clear_space_info_full(root->fs_info); + block_group->space_info->full = 0; btrfs_put_block_group(block_group); btrfs_put_block_group(block_group); diff --git a/trunk/fs/btrfs/free-space-cache.c b/trunk/fs/btrfs/free-space-cache.c index af99b78b288e..4538e48581a5 100644 --- a/trunk/fs/btrfs/free-space-cache.c +++ b/trunk/fs/btrfs/free-space-cache.c @@ -16,46 +16,45 @@ * Boston, MA 021110-1307, USA. */ -#include #include -#include #include "ctree.h" #include "free-space-cache.h" #include "transaction.h" -#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) -#define MAX_CACHE_BYTES_PER_GIG (32 * 1024) +struct btrfs_free_space { + struct rb_node bytes_index; + struct rb_node offset_index; + u64 offset; + u64 bytes; +}; -static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, - u64 offset) +static int tree_insert_offset(struct rb_root *root, u64 offset, + struct rb_node *node) { - BUG_ON(offset < bitmap_start); - offset -= bitmap_start; - return (unsigned long)(div64_u64(offset, sectorsize)); -} + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct btrfs_free_space *info; -static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize) -{ - return (unsigned long)(div64_u64(bytes, sectorsize)); -} + while (*p) { + parent = *p; + info = rb_entry(parent, struct btrfs_free_space, offset_index); -static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, - u64 offset) -{ - u64 bitmap_start; - u64 bytes_per_bitmap; + if (offset < info->offset) + p = &(*p)->rb_left; + else if (offset > info->offset) + p = &(*p)->rb_right; + else + return -EEXIST; + } - bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; - bitmap_start = offset - block_group->key.objectid; - bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); - bitmap_start *= bytes_per_bitmap; - bitmap_start += block_group->key.objectid; + rb_link_node(node, parent, p); + rb_insert_color(node, root); - return bitmap_start; + return 0; } -static int tree_insert_offset(struct rb_root *root, u64 offset, - struct rb_node *node, int bitmap) +static int tree_insert_bytes(struct rb_root *root, u64 bytes, + struct rb_node *node) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; @@ -63,34 +62,12 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, while (*p) { parent = *p; - info = rb_entry(parent, struct btrfs_free_space, offset_index); + info = rb_entry(parent, struct btrfs_free_space, bytes_index); - if (offset < info->offset) { + if (bytes < info->bytes) p = &(*p)->rb_left; - } else if (offset > info->offset) { + else p = &(*p)->rb_right; - } else { - /* - * we could have a bitmap entry and an extent entry - * share the same offset. If this is the case, we want - * the extent entry to always be found first if we do a - * linear search through the tree, since we want to have - * the quickest allocation time, and allocating from an - * extent is faster than allocating from a bitmap. So - * if we're inserting a bitmap and we find an entry at - * this offset, we want to go right, or after this entry - * logically. If we are inserting an extent and we've - * found a bitmap, we want to go left, or before - * logically. - */ - if (bitmap) { - WARN_ON(info->bitmap); - p = &(*p)->rb_right; - } else { - WARN_ON(!info->bitmap); - p = &(*p)->rb_left; - } - } } rb_link_node(node, parent, p); @@ -102,143 +79,110 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, /* * searches the tree for the given offset. * - * fuzzy - If this is set, then we are trying to make an allocation, and we just - * want a section that has at least bytes size and comes at or after the given - * offset. + * fuzzy == 1: this is used for allocations where we are given a hint of where + * to look for free space. Because the hint may not be completely on an offset + * mark, or the hint may no longer point to free space we need to fudge our + * results a bit. So we look for free space starting at or after offset with at + * least bytes size. We prefer to find as close to the given offset as we can. + * Also if the offset is within a free space range, then we will return the free + * space that contains the given offset, which means we can return a free space + * chunk with an offset before the provided offset. + * + * fuzzy == 0: this is just a normal tree search. Give us the free space that + * starts at the given offset which is at least bytes size, and if its not there + * return NULL. */ -static struct btrfs_free_space * -tree_search_offset(struct btrfs_block_group_cache *block_group, - u64 offset, int bitmap_only, int fuzzy) +static struct btrfs_free_space *tree_search_offset(struct rb_root *root, + u64 offset, u64 bytes, + int fuzzy) { - struct rb_node *n = block_group->free_space_offset.rb_node; - struct btrfs_free_space *entry, *prev = NULL; - - /* find entry that is closest to the 'offset' */ - while (1) { - if (!n) { - entry = NULL; - break; - } + struct rb_node *n = root->rb_node; + struct btrfs_free_space *entry, *ret = NULL; + while (n) { entry = rb_entry(n, struct btrfs_free_space, offset_index); - prev = entry; - if (offset < entry->offset) + if (offset < entry->offset) { + if (fuzzy && + (!ret || entry->offset < ret->offset) && + (bytes <= entry->bytes)) + ret = entry; n = n->rb_left; - else if (offset > entry->offset) + } else if (offset > entry->offset) { + if (fuzzy && + (entry->offset + entry->bytes - 1) >= offset && + bytes <= entry->bytes) { + ret = entry; + break; + } n = n->rb_right; - else + } else { + if (bytes > entry->bytes) { + n = n->rb_right; + continue; + } + ret = entry; break; + } } - if (bitmap_only) { - if (!entry) - return NULL; - if (entry->bitmap) - return entry; + return ret; +} - /* - * bitmap entry and extent entry may share same offset, - * in that case, bitmap entry comes after extent entry. - */ - n = rb_next(n); - if (!n) - return NULL; - entry = rb_entry(n, struct btrfs_free_space, offset_index); - if (entry->offset != offset) - return NULL; +/* + * return a chunk at least bytes size, as close to offset that we can get. + */ +static struct btrfs_free_space *tree_search_bytes(struct rb_root *root, + u64 offset, u64 bytes) +{ + struct rb_node *n = root->rb_node; + struct btrfs_free_space *entry, *ret = NULL; + + while (n) { + entry = rb_entry(n, struct btrfs_free_space, bytes_index); - WARN_ON(!entry->bitmap); - return entry; - } else if (entry) { - if (entry->bitmap) { + if (bytes < entry->bytes) { /* - * if previous extent entry covers the offset, - * we should return it instead of the bitmap entry + * We prefer to get a hole size as close to the size we + * are asking for so we don't take small slivers out of + * huge holes, but we also want to get as close to the + * offset as possible so we don't have a whole lot of + * fragmentation. */ - n = &entry->offset_index; - while (1) { - n = rb_prev(n); - if (!n) - break; - prev = rb_entry(n, struct btrfs_free_space, - offset_index); - if (!prev->bitmap) { - if (prev->offset + prev->bytes > offset) - entry = prev; - break; - } + if (offset <= entry->offset) { + if (!ret) + ret = entry; + else if (entry->bytes < ret->bytes) + ret = entry; + else if (entry->offset < ret->offset) + ret = entry; } - } - return entry; - } - - if (!prev) - return NULL; - - /* find last entry before the 'offset' */ - entry = prev; - if (entry->offset > offset) { - n = rb_prev(&entry->offset_index); - if (n) { - entry = rb_entry(n, struct btrfs_free_space, - offset_index); - BUG_ON(entry->offset > offset); + n = n->rb_left; + } else if (bytes > entry->bytes) { + n = n->rb_right; } else { - if (fuzzy) - return entry; - else - return NULL; + /* + * Ok we may have multiple chunks of the wanted size, + * so we don't want to take the first one we find, we + * want to take the one closest to our given offset, so + * keep searching just in case theres a better match. + */ + n = n->rb_right; + if (offset > entry->offset) + continue; + else if (!ret || entry->offset < ret->offset) + ret = entry; } } - if (entry->bitmap) { - n = &entry->offset_index; - while (1) { - n = rb_prev(n); - if (!n) - break; - prev = rb_entry(n, struct btrfs_free_space, - offset_index); - if (!prev->bitmap) { - if (prev->offset + prev->bytes > offset) - return prev; - break; - } - } - if (entry->offset + BITS_PER_BITMAP * - block_group->sectorsize > offset) - return entry; - } else if (entry->offset + entry->bytes > offset) - return entry; - - if (!fuzzy) - return NULL; - - while (1) { - if (entry->bitmap) { - if (entry->offset + BITS_PER_BITMAP * - block_group->sectorsize > offset) - break; - } else { - if (entry->offset + entry->bytes > offset) - break; - } - - n = rb_next(&entry->offset_index); - if (!n) - return NULL; - entry = rb_entry(n, struct btrfs_free_space, offset_index); - } - return entry; + return ret; } static void unlink_free_space(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *info) { rb_erase(&info->offset_index, &block_group->free_space_offset); - block_group->free_extents--; - block_group->free_space -= info->bytes; + rb_erase(&info->bytes_index, &block_group->free_space_bytes); } static int link_free_space(struct btrfs_block_group_cache *block_group, @@ -246,314 +190,17 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, { int ret = 0; - BUG_ON(!info->bitmap && !info->bytes); + + BUG_ON(!info->bytes); ret = tree_insert_offset(&block_group->free_space_offset, info->offset, - &info->offset_index, (info->bitmap != NULL)); + &info->offset_index); if (ret) return ret; - block_group->free_space += info->bytes; - block_group->free_extents++; - return ret; -} - -static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) -{ - u64 max_bytes, possible_bytes; - - /* - * The goal is to keep the total amount of memory used per 1gb of space - * at or below 32k, so we need to adjust how much memory we allow to be - * used by extent based free space tracking - */ - max_bytes = MAX_CACHE_BYTES_PER_GIG * - (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); - - possible_bytes = (block_group->total_bitmaps * PAGE_CACHE_SIZE) + - (sizeof(struct btrfs_free_space) * - block_group->extents_thresh); - - if (possible_bytes > max_bytes) { - int extent_bytes = max_bytes - - (block_group->total_bitmaps * PAGE_CACHE_SIZE); - - if (extent_bytes <= 0) { - block_group->extents_thresh = 0; - return; - } - - block_group->extents_thresh = extent_bytes / - (sizeof(struct btrfs_free_space)); - } -} - -static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info, u64 offset, - u64 bytes) -{ - unsigned long start, end; - unsigned long i; - - start = offset_to_bit(info->offset, block_group->sectorsize, offset); - end = start + bytes_to_bits(bytes, block_group->sectorsize); - BUG_ON(end > BITS_PER_BITMAP); - - for (i = start; i < end; i++) - clear_bit(i, info->bitmap); - - info->bytes -= bytes; - block_group->free_space -= bytes; -} - -static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info, u64 offset, - u64 bytes) -{ - unsigned long start, end; - unsigned long i; - - start = offset_to_bit(info->offset, block_group->sectorsize, offset); - end = start + bytes_to_bits(bytes, block_group->sectorsize); - BUG_ON(end > BITS_PER_BITMAP); - - for (i = start; i < end; i++) - set_bit(i, info->bitmap); - - info->bytes += bytes; - block_group->free_space += bytes; -} - -static int search_bitmap(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *bitmap_info, u64 *offset, - u64 *bytes) -{ - unsigned long found_bits = 0; - unsigned long bits, i; - unsigned long next_zero; - - i = offset_to_bit(bitmap_info->offset, block_group->sectorsize, - max_t(u64, *offset, bitmap_info->offset)); - bits = bytes_to_bits(*bytes, block_group->sectorsize); - - for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); - i < BITS_PER_BITMAP; - i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) { - next_zero = find_next_zero_bit(bitmap_info->bitmap, - BITS_PER_BITMAP, i); - if ((next_zero - i) >= bits) { - found_bits = next_zero - i; - break; - } - i = next_zero; - } - - if (found_bits) { - *offset = (u64)(i * block_group->sectorsize) + - bitmap_info->offset; - *bytes = (u64)(found_bits) * block_group->sectorsize; - return 0; - } - - return -1; -} - -static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache - *block_group, u64 *offset, - u64 *bytes, int debug) -{ - struct btrfs_free_space *entry; - struct rb_node *node; - int ret; - - if (!block_group->free_space_offset.rb_node) - return NULL; - - entry = tree_search_offset(block_group, - offset_to_bitmap(block_group, *offset), - 0, 1); - if (!entry) - return NULL; - - for (node = &entry->offset_index; node; node = rb_next(node)) { - entry = rb_entry(node, struct btrfs_free_space, offset_index); - if (entry->bytes < *bytes) - continue; - - if (entry->bitmap) { - ret = search_bitmap(block_group, entry, offset, bytes); - if (!ret) - return entry; - continue; - } - - *offset = entry->offset; - *bytes = entry->bytes; - return entry; - } - - return NULL; -} - -static void add_new_bitmap(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info, u64 offset) -{ - u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; - int max_bitmaps = (int)div64_u64(block_group->key.offset + - bytes_per_bg - 1, bytes_per_bg); - BUG_ON(block_group->total_bitmaps >= max_bitmaps); - - info->offset = offset_to_bitmap(block_group, offset); - link_free_space(block_group, info); - block_group->total_bitmaps++; - - recalculate_thresholds(block_group); -} - -static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *bitmap_info, - u64 *offset, u64 *bytes) -{ - u64 end; - -again: - end = bitmap_info->offset + - (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; - - if (*offset > bitmap_info->offset && *offset + *bytes > end) { - bitmap_clear_bits(block_group, bitmap_info, *offset, - end - *offset + 1); - *bytes -= end - *offset + 1; - *offset = end + 1; - } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { - bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes); - *bytes = 0; - } - - if (*bytes) { - if (!bitmap_info->bytes) { - unlink_free_space(block_group, bitmap_info); - kfree(bitmap_info->bitmap); - kfree(bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } - - bitmap_info = tree_search_offset(block_group, - offset_to_bitmap(block_group, - *offset), - 1, 0); - if (!bitmap_info) - return -EINVAL; - - if (!bitmap_info->bitmap) - return -EAGAIN; - - goto again; - } else if (!bitmap_info->bytes) { - unlink_free_space(block_group, bitmap_info); - kfree(bitmap_info->bitmap); - kfree(bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } - - return 0; -} - -static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info) -{ - struct btrfs_free_space *bitmap_info; - int added = 0; - u64 bytes, offset, end; - int ret; - - /* - * If we are below the extents threshold then we can add this as an - * extent, and don't have to deal with the bitmap - */ - if (block_group->free_extents < block_group->extents_thresh && - info->bytes > block_group->sectorsize * 4) - return 0; - - /* - * some block groups are so tiny they can't be enveloped by a bitmap, so - * don't even bother to create a bitmap for this - */ - if (BITS_PER_BITMAP * block_group->sectorsize > - block_group->key.offset) - return 0; - - bytes = info->bytes; - offset = info->offset; - -again: - bitmap_info = tree_search_offset(block_group, - offset_to_bitmap(block_group, offset), - 1, 0); - if (!bitmap_info) { - BUG_ON(added); - goto new_bitmap; - } - - end = bitmap_info->offset + - (u64)(BITS_PER_BITMAP * block_group->sectorsize); - - if (offset >= bitmap_info->offset && offset + bytes > end) { - bitmap_set_bits(block_group, bitmap_info, offset, - end - offset); - bytes -= end - offset; - offset = end; - added = 0; - } else if (offset >= bitmap_info->offset && offset + bytes <= end) { - bitmap_set_bits(block_group, bitmap_info, offset, bytes); - bytes = 0; - } else { - BUG(); - } - - if (!bytes) { - ret = 1; - goto out; - } else - goto again; - -new_bitmap: - if (info && info->bitmap) { - add_new_bitmap(block_group, info, offset); - added = 1; - info = NULL; - goto again; - } else { - spin_unlock(&block_group->tree_lock); - - /* no pre-allocated info, allocate a new one */ - if (!info) { - info = kzalloc(sizeof(struct btrfs_free_space), - GFP_NOFS); - if (!info) { - spin_lock(&block_group->tree_lock); - ret = -ENOMEM; - goto out; - } - } - - /* allocate the bitmap */ - info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); - spin_lock(&block_group->tree_lock); - if (!info->bitmap) { - ret = -ENOMEM; - goto out; - } - goto again; - } - -out: - if (info) { - if (info->bitmap) - kfree(info->bitmap); - kfree(info); - } + ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes, + &info->bytes_index); + if (ret) + return ret; return ret; } @@ -561,8 +208,8 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) { - struct btrfs_free_space *right_info = NULL; - struct btrfs_free_space *left_info = NULL; + struct btrfs_free_space *right_info; + struct btrfs_free_space *left_info; struct btrfs_free_space *info = NULL; int ret = 0; @@ -580,38 +227,18 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, * are adding, if there is remove that struct and add a new one to * cover the entire range */ - right_info = tree_search_offset(block_group, offset + bytes, 0, 0); - if (right_info && rb_prev(&right_info->offset_index)) - left_info = rb_entry(rb_prev(&right_info->offset_index), - struct btrfs_free_space, offset_index); - else - left_info = tree_search_offset(block_group, offset - 1, 0, 0); + right_info = tree_search_offset(&block_group->free_space_offset, + offset+bytes, 0, 0); + left_info = tree_search_offset(&block_group->free_space_offset, + offset-1, 0, 1); - /* - * If there was no extent directly to the left or right of this new - * extent then we know we're going to have to allocate a new extent, so - * before we do that see if we need to drop this into a bitmap - */ - if ((!left_info || left_info->bitmap) && - (!right_info || right_info->bitmap)) { - ret = insert_into_bitmap(block_group, info); - - if (ret < 0) { - goto out; - } else if (ret) { - ret = 0; - goto out; - } - } - - if (right_info && !right_info->bitmap) { + if (right_info) { unlink_free_space(block_group, right_info); info->bytes += right_info->bytes; kfree(right_info); } - if (left_info && !left_info->bitmap && - left_info->offset + left_info->bytes == offset) { + if (left_info && left_info->offset + left_info->bytes == offset) { unlink_free_space(block_group, left_info); info->offset = left_info->offset; info->bytes += left_info->bytes; @@ -621,11 +248,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, ret = link_free_space(block_group, info); if (ret) kfree(info); -out: + spin_unlock(&block_group->tree_lock); if (ret) { - printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); + printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); BUG_ON(ret == -EEXIST); } @@ -636,65 +263,40 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) { struct btrfs_free_space *info; - struct btrfs_free_space *next_info = NULL; int ret = 0; spin_lock(&block_group->tree_lock); -again: - info = tree_search_offset(block_group, offset, 0, 0); - if (!info) { - WARN_ON(1); - goto out_lock; - } - - if (info->bytes < bytes && rb_next(&info->offset_index)) { - u64 end; - next_info = rb_entry(rb_next(&info->offset_index), - struct btrfs_free_space, - offset_index); - - if (next_info->bitmap) - end = next_info->offset + BITS_PER_BITMAP * - block_group->sectorsize - 1; - else - end = next_info->offset + next_info->bytes; - - if (next_info->bytes < bytes || - next_info->offset > offset || offset > end) { - printk(KERN_CRIT "Found free space at %llu, size %llu," - " trying to use %llu\n", - (unsigned long long)info->offset, - (unsigned long long)info->bytes, - (unsigned long long)bytes); + info = tree_search_offset(&block_group->free_space_offset, offset, 0, + 1); + if (info && info->offset == offset) { + if (info->bytes < bytes) { + printk(KERN_ERR "Found free space at %llu, size %llu," + "trying to use %llu\n", + (unsigned long long)info->offset, + (unsigned long long)info->bytes, + (unsigned long long)bytes); WARN_ON(1); ret = -EINVAL; - goto out_lock; + spin_unlock(&block_group->tree_lock); + goto out; } - - info = next_info; - } - - if (info->bytes == bytes) { unlink_free_space(block_group, info); - if (info->bitmap) { - kfree(info->bitmap); - block_group->total_bitmaps--; + + if (info->bytes == bytes) { + kfree(info); + spin_unlock(&block_group->tree_lock); + goto out; } - kfree(info); - goto out_lock; - } - if (!info->bitmap && info->offset == offset) { - unlink_free_space(block_group, info); info->offset += bytes; info->bytes -= bytes; - link_free_space(block_group, info); - goto out_lock; - } - if (!info->bitmap && info->offset <= offset && - info->offset + info->bytes >= offset + bytes) { + ret = link_free_space(block_group, info); + spin_unlock(&block_group->tree_lock); + BUG_ON(ret); + } else if (info && info->offset < offset && + info->offset + info->bytes >= offset + bytes) { u64 old_start = info->offset; /* * we're freeing space in the middle of the info, @@ -710,9 +312,7 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, info->offset = offset + bytes; info->bytes = old_end - info->offset; ret = link_free_space(block_group, info); - WARN_ON(ret); - if (ret) - goto out_lock; + BUG_ON(ret); } else { /* the hole we're creating ends at the end * of the info struct, just free the info @@ -720,22 +320,32 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, kfree(info); } spin_unlock(&block_group->tree_lock); - - /* step two, insert a new info struct to cover - * anything before the hole + /* step two, insert a new info struct to cover anything + * before the hole */ ret = btrfs_add_free_space(block_group, old_start, offset - old_start); - WARN_ON(ret); - goto out; + BUG_ON(ret); + } else { + spin_unlock(&block_group->tree_lock); + if (!info) { + printk(KERN_ERR "couldn't find space %llu to free\n", + (unsigned long long)offset); + printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n", + block_group->cached, + (unsigned long long)block_group->key.objectid, + (unsigned long long)block_group->key.offset); + btrfs_dump_free_space(block_group, bytes); + } else if (info) { + printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, " + "but wanted offset=%llu bytes=%llu\n", + (unsigned long long)info->offset, + (unsigned long long)info->bytes, + (unsigned long long)offset, + (unsigned long long)bytes); + } + WARN_ON(1); } - - ret = remove_from_bitmap(block_group, info, &offset, &bytes); - if (ret == -EAGAIN) - goto again; - BUG_ON(ret); -out_lock: - spin_unlock(&block_group->tree_lock); out: return ret; } @@ -751,13 +361,10 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, info = rb_entry(n, struct btrfs_free_space, offset_index); if (info->bytes >= bytes) count++; - printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n", + printk(KERN_ERR "entry offset %llu, bytes %llu\n", (unsigned long long)info->offset, - (unsigned long long)info->bytes, - (info->bitmap) ? "yes" : "no"); + (unsigned long long)info->bytes); } - printk(KERN_INFO "block group has cluster?: %s\n", - list_empty(&block_group->cluster_list) ? "no" : "yes"); printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" "\n", count); } @@ -790,35 +397,26 @@ __btrfs_return_cluster_to_free_space( { struct btrfs_free_space *entry; struct rb_node *node; - bool bitmap; spin_lock(&cluster->lock); if (cluster->block_group != block_group) goto out; - bitmap = cluster->points_to_bitmap; - cluster->block_group = NULL; cluster->window_start = 0; - list_del_init(&cluster->block_group_list); - cluster->points_to_bitmap = false; - - if (bitmap) - goto out; - node = rb_first(&cluster->root); - while (node) { + while(node) { entry = rb_entry(node, struct btrfs_free_space, offset_index); node = rb_next(&entry->offset_index); rb_erase(&entry->offset_index, &cluster->root); - BUG_ON(entry->bitmap); - tree_insert_offset(&block_group->free_space_offset, - entry->offset, &entry->offset_index, 0); + link_free_space(block_group, entry); } - cluster->root.rb_node = NULL; + list_del_init(&cluster->block_group_list); + btrfs_put_block_group(cluster->block_group); + cluster->block_group = NULL; + cluster->root.rb_node = NULL; out: spin_unlock(&cluster->lock); - btrfs_put_block_group(block_group); return 0; } @@ -827,28 +425,20 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) struct btrfs_free_space *info; struct rb_node *node; struct btrfs_free_cluster *cluster; - struct list_head *head; + struct btrfs_free_cluster *safe; spin_lock(&block_group->tree_lock); - while ((head = block_group->cluster_list.next) != - &block_group->cluster_list) { - cluster = list_entry(head, struct btrfs_free_cluster, - block_group_list); + + list_for_each_entry_safe(cluster, safe, &block_group->cluster_list, + block_group_list) { WARN_ON(cluster->block_group != block_group); __btrfs_return_cluster_to_free_space(block_group, cluster); - if (need_resched()) { - spin_unlock(&block_group->tree_lock); - cond_resched(); - spin_lock(&block_group->tree_lock); - } } - while ((node = rb_last(&block_group->free_space_offset)) != NULL) { - info = rb_entry(node, struct btrfs_free_space, offset_index); + while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { + info = rb_entry(node, struct btrfs_free_space, bytes_index); unlink_free_space(block_group, info); - if (info->bitmap) - kfree(info->bitmap); kfree(info); if (need_resched()) { spin_unlock(&block_group->tree_lock); @@ -856,7 +446,6 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) spin_lock(&block_group->tree_lock); } } - spin_unlock(&block_group->tree_lock); } @@ -864,35 +453,25 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size) { struct btrfs_free_space *entry = NULL; - u64 bytes_search = bytes + empty_size; u64 ret = 0; spin_lock(&block_group->tree_lock); - entry = find_free_space(block_group, &offset, &bytes_search, 0); + entry = tree_search_offset(&block_group->free_space_offset, offset, + bytes + empty_size, 1); if (!entry) - goto out; - - ret = offset; - if (entry->bitmap) { - bitmap_clear_bits(block_group, entry, offset, bytes); - if (!entry->bytes) { - unlink_free_space(block_group, entry); - kfree(entry->bitmap); - kfree(entry); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } - } else { + entry = tree_search_bytes(&block_group->free_space_bytes, + offset, bytes + empty_size); + if (entry) { unlink_free_space(block_group, entry); + ret = entry->offset; entry->offset += bytes; entry->bytes -= bytes; + if (!entry->bytes) kfree(entry); else link_free_space(block_group, entry); } - -out: spin_unlock(&block_group->tree_lock); return ret; @@ -938,47 +517,6 @@ int btrfs_return_cluster_to_free_space( return ret; } -static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, - struct btrfs_free_cluster *cluster, - u64 bytes, u64 min_start) -{ - struct btrfs_free_space *entry; - int err; - u64 search_start = cluster->window_start; - u64 search_bytes = bytes; - u64 ret = 0; - - spin_lock(&block_group->tree_lock); - spin_lock(&cluster->lock); - - if (!cluster->points_to_bitmap) - goto out; - - if (cluster->block_group != block_group) - goto out; - - entry = tree_search_offset(block_group, search_start, 0, 0); - - if (!entry || !entry->bitmap) - goto out; - - search_start = min_start; - search_bytes = bytes; - - err = search_bitmap(block_group, entry, &search_start, - &search_bytes); - if (err) - goto out; - - ret = search_start; - bitmap_clear_bits(block_group, entry, ret, bytes); -out: - spin_unlock(&cluster->lock); - spin_unlock(&block_group->tree_lock); - - return ret; -} - /* * given a cluster, try to allocate 'bytes' from it, returns 0 * if it couldn't find anything suitably large, or a logical disk offset @@ -992,10 +530,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, struct rb_node *node; u64 ret = 0; - if (cluster->points_to_bitmap) - return btrfs_alloc_from_bitmap(block_group, cluster, bytes, - min_start); - spin_lock(&cluster->lock); if (bytes > cluster->max_size) goto out; @@ -1033,73 +567,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, } out: spin_unlock(&cluster->lock); - return ret; } -static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *entry, - struct btrfs_free_cluster *cluster, - u64 offset, u64 bytes, u64 min_bytes) -{ - unsigned long next_zero; - unsigned long i; - unsigned long search_bits; - unsigned long total_bits; - unsigned long found_bits; - unsigned long start = 0; - unsigned long total_found = 0; - bool found = false; - - i = offset_to_bit(entry->offset, block_group->sectorsize, - max_t(u64, offset, entry->offset)); - search_bits = bytes_to_bits(min_bytes, block_group->sectorsize); - total_bits = bytes_to_bits(bytes, block_group->sectorsize); - -again: - found_bits = 0; - for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i); - i < BITS_PER_BITMAP; - i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) { - next_zero = find_next_zero_bit(entry->bitmap, - BITS_PER_BITMAP, i); - if (next_zero - i >= search_bits) { - found_bits = next_zero - i; - break; - } - i = next_zero; - } - - if (!found_bits) - return -1; - - if (!found) { - start = i; - found = true; - } - - total_found += found_bits; - - if (cluster->max_size < found_bits * block_group->sectorsize) - cluster->max_size = found_bits * block_group->sectorsize; - - if (total_found < total_bits) { - i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero); - if (i - start > total_bits * 2) { - total_found = 0; - cluster->max_size = 0; - found = false; - } - goto again; - } - - cluster->window_start = start * block_group->sectorsize + - entry->offset; - cluster->points_to_bitmap = true; - - return 0; -} - /* * here we try to find a cluster of blocks in a block group. The goal * is to find at least bytes free and up to empty_size + bytes free. @@ -1117,12 +587,12 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, struct btrfs_free_space *entry = NULL; struct rb_node *node; struct btrfs_free_space *next; - struct btrfs_free_space *last = NULL; + struct btrfs_free_space *last; u64 min_bytes; u64 window_start; u64 window_free; u64 max_extent = 0; - bool found_bitmap = false; + int total_retries = 0; int ret; /* for metadata, allow allocates with more holes */ @@ -1150,79 +620,30 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, goto out; } again: - entry = tree_search_offset(block_group, offset, found_bitmap, 1); + min_bytes = min(min_bytes, bytes + empty_size); + entry = tree_search_bytes(&block_group->free_space_bytes, + offset, min_bytes); if (!entry) { ret = -ENOSPC; goto out; } - - /* - * If found_bitmap is true, we exhausted our search for extent entries, - * and we just want to search all of the bitmaps that we can find, and - * ignore any extent entries we find. - */ - while (entry->bitmap || found_bitmap || - (!entry->bitmap && entry->bytes < min_bytes)) { - struct rb_node *node = rb_next(&entry->offset_index); - - if (entry->bitmap && entry->bytes > bytes + empty_size) { - ret = btrfs_bitmap_cluster(block_group, entry, cluster, - offset, bytes + empty_size, - min_bytes); - if (!ret) - goto got_it; - } - - if (!node) { - ret = -ENOSPC; - goto out; - } - entry = rb_entry(node, struct btrfs_free_space, offset_index); - } - - /* - * We already searched all the extent entries from the passed in offset - * to the end and didn't find enough space for the cluster, and we also - * didn't find any bitmaps that met our criteria, just go ahead and exit - */ - if (found_bitmap) { - ret = -ENOSPC; - goto out; - } - - cluster->points_to_bitmap = false; window_start = entry->offset; window_free = entry->bytes; last = entry; max_extent = entry->bytes; - while (1) { + while(1) { /* out window is just right, lets fill it */ if (window_free >= bytes + empty_size) break; node = rb_next(&last->offset_index); if (!node) { - if (found_bitmap) - goto again; ret = -ENOSPC; goto out; } next = rb_entry(node, struct btrfs_free_space, offset_index); - /* - * we found a bitmap, so if this search doesn't result in a - * cluster, we know to go and search again for the bitmaps and - * start looking for space there - */ - if (next->bitmap) { - if (!found_bitmap) - offset = next->offset; - found_bitmap = true; - last = next; - continue; - } - /* * we haven't filled the empty size and the window is * very large. reset and try again @@ -1234,6 +655,19 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, window_free = entry->bytes; last = entry; max_extent = 0; + total_retries++; + if (total_retries % 64 == 0) { + if (min_bytes >= (bytes + empty_size)) { + ret = -ENOSPC; + goto out; + } + /* + * grow our allocation a bit, we're not having + * much luck + */ + min_bytes *= 2; + goto again; + } } else { last = next; window_free += next->bytes; @@ -1251,19 +685,11 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, * The cluster includes an rbtree, but only uses the offset index * of each free space cache entry. */ - while (1) { + while(1) { node = rb_next(&entry->offset_index); - if (entry->bitmap && node) { - entry = rb_entry(node, struct btrfs_free_space, - offset_index); - continue; - } else if (entry->bitmap && !node) { - break; - } - - rb_erase(&entry->offset_index, &block_group->free_space_offset); + unlink_free_space(block_group, entry); ret = tree_insert_offset(&cluster->root, entry->offset, - &entry->offset_index, 0); + &entry->offset_index); BUG_ON(ret); if (!node || entry == last) @@ -1271,10 +697,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, entry = rb_entry(node, struct btrfs_free_space, offset_index); } - - cluster->max_size = max_extent; -got_it: ret = 0; + cluster->max_size = max_extent; atomic_inc(&block_group->count); list_add_tail(&cluster->block_group_list, &block_group->cluster_list); cluster->block_group = block_group; @@ -1294,7 +718,6 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) spin_lock_init(&cluster->refill_lock); cluster->root.rb_node = NULL; cluster->max_size = 0; - cluster->points_to_bitmap = false; INIT_LIST_HEAD(&cluster->block_group_list); cluster->block_group = NULL; } diff --git a/trunk/fs/btrfs/free-space-cache.h b/trunk/fs/btrfs/free-space-cache.h index 890a8e79011b..266fb8764054 100644 --- a/trunk/fs/btrfs/free-space-cache.h +++ b/trunk/fs/btrfs/free-space-cache.h @@ -19,14 +19,6 @@ #ifndef __BTRFS_FREE_SPACE_CACHE #define __BTRFS_FREE_SPACE_CACHE -struct btrfs_free_space { - struct rb_node offset_index; - u64 offset; - u64 bytes; - unsigned long *bitmap; - struct list_head list; -}; - int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, u64 bytenr, u64 size); int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index 56fe83fa60c4..791eab19e330 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -2603,8 +2603,8 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, if (root->ref_cows) btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); path = btrfs_alloc_path(); - BUG_ON(!path); path->reada = -1; + BUG_ON(!path); /* FIXME, add redo link to tree so we don't leak on crash */ key.objectid = inode->i_ino; diff --git a/trunk/fs/btrfs/print-tree.c b/trunk/fs/btrfs/print-tree.c index 0d126be22b63..6d6523da0a30 100644 --- a/trunk/fs/btrfs/print-tree.c +++ b/trunk/fs/btrfs/print-tree.c @@ -309,7 +309,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c) } printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n", (unsigned long long)btrfs_header_bytenr(c), - level, nr, + btrfs_header_level(c), nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr); for (i = 0; i < nr; i++) { btrfs_node_key_to_cpu(c, &key, i); @@ -326,10 +326,10 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c) btrfs_level_size(root, level - 1), btrfs_node_ptr_generation(c, i)); if (btrfs_is_leaf(next) && - level != 1) + btrfs_header_level(c) != 1) BUG(); if (btrfs_header_level(next) != - level - 1) + btrfs_header_level(c) - 1) BUG(); btrfs_print_tree(root, next); free_extent_buffer(next); diff --git a/trunk/fs/btrfs/relocation.c b/trunk/fs/btrfs/relocation.c index e71264d1c2c9..008397934778 100644 --- a/trunk/fs/btrfs/relocation.c +++ b/trunk/fs/btrfs/relocation.c @@ -670,8 +670,6 @@ static struct backref_node *build_backref_tree(struct reloc_control *rc, err = ret; goto out; } - if (ret > 0 && path2->slots[level] > 0) - path2->slots[level]--; eb = path2->nodes[level]; WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) != @@ -1611,7 +1609,6 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, BUG_ON(level == 0); path->lowest_level = level; ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); - path->lowest_level = 0; if (ret < 0) { btrfs_free_path(path); return ret; diff --git a/trunk/fs/btrfs/transaction.c b/trunk/fs/btrfs/transaction.c index e51d2bc532f8..2dbf1c1f56ee 100644 --- a/trunk/fs/btrfs/transaction.c +++ b/trunk/fs/btrfs/transaction.c @@ -40,14 +40,6 @@ static noinline void put_transaction(struct btrfs_transaction *transaction) } } -static noinline void switch_commit_root(struct btrfs_root *root) -{ - down_write(&root->commit_root_sem); - free_extent_buffer(root->commit_root); - root->commit_root = btrfs_root_node(root); - up_write(&root->commit_root_sem); -} - /* * either allocate a new transaction or hop into the existing one */ @@ -452,6 +444,9 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, btrfs_write_dirty_block_groups(trans, root); + ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); + BUG_ON(ret); + while (1) { old_root_bytenr = btrfs_root_bytenr(&root->root_item); if (old_root_bytenr == root->node->start) @@ -462,11 +457,13 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, &root->root_key, &root->root_item); BUG_ON(ret); + btrfs_write_dirty_block_groups(trans, root); - ret = btrfs_write_dirty_block_groups(trans, root); + ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); BUG_ON(ret); } - switch_commit_root(root); + free_extent_buffer(root->commit_root); + root->commit_root = btrfs_root_node(root); return 0; } @@ -498,6 +495,9 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, root = list_entry(next, struct btrfs_root, dirty_list); update_cowonly_root(trans, root); + + ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); + BUG_ON(ret); } return 0; } @@ -544,7 +544,8 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, btrfs_update_reloc_root(trans, root); if (root->commit_root != root->node) { - switch_commit_root(root); + free_extent_buffer(root->commit_root); + root->commit_root = btrfs_root_node(root); btrfs_set_root_node(&root->root_item, root->node); } @@ -942,11 +943,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, mutex_unlock(&root->fs_info->trans_mutex); - if (flush_on_commit) { - btrfs_start_delalloc_inodes(root); - ret = btrfs_wait_ordered_extents(root, 0); - BUG_ON(ret); - } else if (snap_pending) { + if (flush_on_commit || snap_pending) { + if (flush_on_commit) + btrfs_start_delalloc_inodes(root); ret = btrfs_wait_ordered_extents(root, 1); BUG_ON(ret); } @@ -1010,11 +1009,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, btrfs_set_root_node(&root->fs_info->tree_root->root_item, root->fs_info->tree_root->node); - switch_commit_root(root->fs_info->tree_root); + free_extent_buffer(root->fs_info->tree_root->commit_root); + root->fs_info->tree_root->commit_root = + btrfs_root_node(root->fs_info->tree_root); btrfs_set_root_node(&root->fs_info->chunk_root->root_item, root->fs_info->chunk_root->node); - switch_commit_root(root->fs_info->chunk_root); + free_extent_buffer(root->fs_info->chunk_root->commit_root); + root->fs_info->chunk_root->commit_root = + btrfs_root_node(root->fs_info->chunk_root); update_super_roots(root); @@ -1054,7 +1057,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, cur_trans->commit_done = 1; root->fs_info->last_trans_committed = cur_trans->transid; - wake_up(&cur_trans->commit_wait); put_transaction(cur_trans); diff --git a/trunk/fs/btrfs/tree-log.c b/trunk/fs/btrfs/tree-log.c index d91b0de7c502..c13922206d1b 100644 --- a/trunk/fs/btrfs/tree-log.c +++ b/trunk/fs/btrfs/tree-log.c @@ -797,7 +797,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, return -ENOENT; inode = read_one_inode(root, key->objectid); - BUG_ON(!inode); + BUG_ON(!dir); ref_ptr = btrfs_item_ptr_offset(eb, slot); ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); diff --git a/trunk/fs/btrfs/volumes.c b/trunk/fs/btrfs/volumes.c index 5dbefd11b4af..3ab80e9cd767 100644 --- a/trunk/fs/btrfs/volumes.c +++ b/trunk/fs/btrfs/volumes.c @@ -721,8 +721,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, */ static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, - u64 num_bytes, u64 *start, - u64 *max_avail) + u64 num_bytes, u64 *start) { struct btrfs_key key; struct btrfs_root *root = device->dev_root; @@ -759,13 +758,9 @@ static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, ret = btrfs_search_slot(trans, root, &key, path, 0, 0); if (ret < 0) goto error; - if (ret > 0) { - ret = btrfs_previous_item(root, path, key.objectid, key.type); - if (ret < 0) - goto error; - if (ret > 0) - start_found = 1; - } + ret = btrfs_previous_item(root, path, 0, key.type); + if (ret < 0) + goto error; l = path->nodes[0]; btrfs_item_key_to_cpu(l, &key, path->slots[0]); while (1) { @@ -808,10 +803,6 @@ static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, if (last_byte < search_start) last_byte = search_start; hole_size = key.offset - last_byte; - - if (hole_size > *max_avail) - *max_avail = hole_size; - if (key.offset > last_byte && hole_size >= num_bytes) { *start = last_byte; @@ -1630,7 +1621,6 @@ static int __btrfs_grow_device(struct btrfs_trans_handle *trans, device->fs_devices->total_rw_bytes += diff; device->total_bytes = new_size; - device->disk_total_bytes = new_size; btrfs_clear_space_info_full(device->dev_root->fs_info); return btrfs_update_device(trans, device); @@ -2017,7 +2007,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) goto done; if (ret) { ret = 0; - break; + goto done; } l = path->nodes[0]; @@ -2025,7 +2015,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) btrfs_item_key_to_cpu(l, &key, path->slots[0]); if (key.objectid != device->devid) - break; + goto done; dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); length = btrfs_dev_extent_length(l, dev_extent); @@ -2181,7 +2171,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, max_chunk_size); again: - max_avail = 0; if (!map || map->num_stripes != num_stripes) { kfree(map); map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); @@ -2230,8 +2219,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, if (device->in_fs_metadata && avail >= min_free) { ret = find_free_dev_extent(trans, device, - min_free, &dev_offset, - &max_avail); + min_free, &dev_offset); if (ret == 0) { list_move_tail(&device->dev_alloc_list, &private_devs); @@ -2807,6 +2795,26 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, } } + for (i = 0; i > nr; i++) { + struct btrfs_multi_bio *multi; + struct btrfs_bio_stripe *stripe; + int ret; + + length = 1; + ret = btrfs_map_block(map_tree, WRITE, buf[i], + &length, &multi, 0); + BUG_ON(ret); + + stripe = multi->stripes; + for (j = 0; j < multi->num_stripes; j++) { + if (stripe->physical >= physical && + physical < stripe->physical + length) + break; + } + BUG_ON(j >= multi->num_stripes); + kfree(multi); + } + *logical = buf; *naddrs = nr; *stripe_len = map->stripe_len; diff --git a/trunk/fs/ecryptfs/keystore.c b/trunk/fs/ecryptfs/keystore.c index 259525c9abb8..af737bb56cb7 100644 --- a/trunk/fs/ecryptfs/keystore.c +++ b/trunk/fs/ecryptfs/keystore.c @@ -1303,13 +1303,6 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat, } (*new_auth_tok)->session_key.encrypted_key_size = (body_size - (ECRYPTFS_SALT_SIZE + 5)); - if ((*new_auth_tok)->session_key.encrypted_key_size - > ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) { - printk(KERN_WARNING "Tag 3 packet contains key larger " - "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n"); - rc = -EINVAL; - goto out_free; - } if (unlikely(data[(*packet_size)++] != 0x04)) { printk(KERN_WARNING "Unknown version number [%d]\n", data[(*packet_size) - 1]); @@ -1456,12 +1449,6 @@ parse_tag_11_packet(unsigned char *data, unsigned char *contents, rc = -EINVAL; goto out; } - if (unlikely((*tag_11_contents_size) > max_contents_bytes)) { - printk(KERN_ERR "Literal data section in tag 11 packet exceeds " - "expected size\n"); - rc = -EINVAL; - goto out; - } if (data[(*packet_size)++] != 0x62) { printk(KERN_WARNING "Unrecognizable packet\n"); rc = -EINVAL; diff --git a/trunk/include/linux/tty.h b/trunk/include/linux/tty.h index e8c6c9136c97..1488d8c81aac 100644 --- a/trunk/include/linux/tty.h +++ b/trunk/include/linux/tty.h @@ -394,7 +394,6 @@ extern void __do_SAK(struct tty_struct *tty); extern void disassociate_ctty(int priv); extern void no_tty(void); extern void tty_flip_buffer_push(struct tty_struct *tty); -extern void tty_flush_to_ldisc(struct tty_struct *tty); extern void tty_buffer_free_all(struct tty_struct *tty); extern void tty_buffer_flush(struct tty_struct *tty); extern void tty_buffer_init(struct tty_struct *tty);