diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index a6101f095db81bbcee04389d57984f0dfba1aa3b..f1727bb389a1d9470dd6b9fe5e59a9627b2f0087 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -985,6 +985,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, if (!dev) return 0; + dev->slot_id = slot_id; + /* Allocate the (output) device context that will be used in the HC. */ dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); if (!dev->out_ctx) @@ -1003,6 +1005,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, /* Initialize the cancellation list and watchdog timers for each ep */ for (i = 0; i < 31; i++) { + dev->eps[i].ep_index = i; + dev->eps[i].vdev = dev; xhci_init_endpoint_timer(xhci, &dev->eps[i]); INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 4c2376a4c1d6b2eb0a5e14b9f08275fd06430ea0..dbb1879e93cfb697117af1c4119adbbd6de7bf60 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -258,6 +258,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x9026) xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT; + if (pdev->vendor == PCI_VENDOR_ID_AMD && + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2 || + pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4)) + xhci->quirks |= XHCI_NO_SOFT_RETRY; + if (xhci->quirks & XHCI_RESET_ON_RESUME) xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "QUIRK: Resetting on resume"); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 98b67605d3cf3a0b2d0343451358bd59be000d45..f81bf1e171c59f2af09fde10d9fb058fa0a5514f 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -252,6 +252,26 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, trace_xhci_inc_enq(ring); } +static int xhci_num_trbs_to(struct xhci_segment *start_seg, union xhci_trb *start, + struct xhci_segment *end_seg, union xhci_trb *end, + unsigned int num_segs) +{ + union xhci_trb *last_on_seg; + int num = 0; + int i = 0; + + do { + if (start_seg == end_seg && end >= start) + return num + (end - start); + last_on_seg = &start_seg->trbs[TRBS_PER_SEGMENT - 1]; + num += last_on_seg - start; + start_seg = start_seg->next; + start = start_seg->trbs; + } while (i++ <= num_segs); + + return -EINVAL; +} + /* * Check to see if there's room to enqueue num_trbs on the ring and make sure * enqueue pointer will not advance into dequeue segment. See rules above. @@ -399,7 +419,7 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, * stream once the endpoint is on the HW schedule. */ if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) || - (ep_state & EP_HALTED)) + (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT)) return; writel(DB_VALUE(ep_index, stream_id), db_addr); /* The CPU has better things to do at this point than wait for a @@ -433,6 +453,33 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, } } +void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, + unsigned int slot_id, + unsigned int ep_index) +{ + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); +} + +static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci, + unsigned int slot_id, + unsigned int ep_index) +{ + if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) { + xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); + return NULL; + } + if (ep_index >= EP_CTX_PER_DEV) { + xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index); + return NULL; + } + if (!xhci->devs[slot_id]) { + xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id); + return NULL; + } + + return &xhci->devs[slot_id]->eps[ep_index]; +} + /* Get the right ring for the given slot_id, ep_index and stream_id. * If the endpoint supports streams, boundary check the URB's stream ID. * If the endpoint doesn't support streams, return the singular endpoint ring. @@ -443,7 +490,10 @@ struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, { struct xhci_virt_ep *ep; - ep = &xhci->devs[slot_id]->eps[ep_index]; + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) + return NULL; + /* Common case: no streams */ if (!(ep->ep_state & EP_HAS_STREAMS)) return ep->ring; @@ -534,6 +584,23 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, stream_id); return; } + /* + * A cancelled TD can complete with a stall if HW cached the trb. + * In this case driver can't find cur_td, but if the ring is empty we + * can move the dequeue pointer to the current enqueue position. + */ + if (!cur_td) { + if (list_empty(&ep_ring->td_list)) { + state->new_deq_seg = ep_ring->enq_seg; + state->new_deq_ptr = ep_ring->enqueue; + state->new_cycle_state = ep_ring->cycle_state; + goto done; + } else { + xhci_warn(xhci, "Can't find new dequeue state, missing cur_td\n"); + return; + } + } + /* Dig out the cycle state saved by the xHC during the stop ep cmd */ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Finding endpoint context"); @@ -579,6 +646,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_deq_seg = new_seg; state->new_deq_ptr = new_deq; +done: /* Don't update the ring cycle state for the producer (us). */ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Cycle state = 0x%x", state->new_cycle_state); @@ -713,11 +781,14 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, memset(&deq_state, 0, sizeof(deq_state)); ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) + return; + vdev = xhci->devs[slot_id]; ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); trace_xhci_handle_cmd_stop_ep(ep_ctx); - ep = &xhci->devs[slot_id]->eps[ep_index]; last_unlinked_td = list_last_entry(&ep->cancelled_td_list, struct xhci_td, cancelled_td_list); @@ -1038,9 +1109,11 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); - dev = xhci->devs[slot_id]; - ep = &dev->eps[ep_index]; + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) + return; + dev = xhci->devs[slot_id]; ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); if (!ep_ring) { xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", @@ -1113,9 +1186,9 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, } cleanup: - dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; - dev->eps[ep_index].queued_deq_seg = NULL; - dev->eps[ep_index].queued_deq_ptr = NULL; + ep->ep_state &= ~SET_DEQ_PENDING; + ep->queued_deq_seg = NULL; + ep->queued_deq_ptr = NULL; /* Restart any rings with pending URBs */ ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } @@ -1124,10 +1197,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, union xhci_trb *trb, u32 cmd_comp_code) { struct xhci_virt_device *vdev; + struct xhci_virt_ep *ep; struct xhci_ep_ctx *ep_ctx; unsigned int ep_index; ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) + return; + vdev = xhci->devs[slot_id]; ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); trace_xhci_handle_cmd_reset_ep(ep_ctx); @@ -1157,8 +1235,12 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, xhci_ring_cmd_db(xhci); } else { /* Clear our internal halted state */ - xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; + ep->ep_state &= ~EP_HALTED; } + + /* if this was a soft reset, then restart */ + if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, @@ -1790,19 +1872,35 @@ struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, return NULL; } +static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, + struct xhci_virt_ep *ep) +{ + /* + * As part of low/full-speed endpoint-halt processing + * we must clear the TT buffer (USB 2.0 specification 11.17.5). + */ + if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) && + (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) && + !(ep->ep_state & EP_CLEARING_TT)) { + ep->ep_state |= EP_CLEARING_TT; + td->urb->ep->hcpriv = td->urb->dev; + if (usb_hub_clear_tt_buffer(td->urb)) + ep->ep_state &= ~EP_CLEARING_TT; + } +} + static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, - unsigned int slot_id, unsigned int ep_index, - unsigned int stream_id, struct xhci_td *td, - enum xhci_ep_reset_type reset_type) + struct xhci_virt_ep *ep, unsigned int stream_id, + struct xhci_td *td, + enum xhci_ep_reset_type reset_type) { - struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; struct xhci_command *command; - + unsigned int slot_id = ep->vdev->slot_id; /* * Avoid resetting endpoint if link is inactive. Can cause host hang. * Device will be reset soon to recover the link so don't do anything */ - if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) + if (ep->vdev->flags & VDEV_PORT_ERROR) return; command = xhci_alloc_command(xhci, false, GFP_ATOMIC); @@ -1811,11 +1909,12 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, ep->ep_state |= EP_HALTED; - xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); + xhci_queue_reset_ep(xhci, command, slot_id, ep->ep_index, reset_type); if (reset_type == EP_HARD_RESET) { ep->ep_state |= EP_HARD_CLEAR_TOGGLE; - xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td); + xhci_cleanup_stalled_ring(xhci, slot_id, ep->ep_index, stream_id, + td); } xhci_ring_cmd_db(xhci); } @@ -1861,7 +1960,7 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) } static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, - struct xhci_ring *ep_ring, int *status) + struct xhci_ring *ep_ring, int status) { struct urb *urb = NULL; @@ -1880,7 +1979,7 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", urb->transfer_buffer_length, urb->actual_length); urb->actual_length = 0; - *status = 0; + status = 0; } list_del_init(&td->td_list); /* Was this TD slated to be cancelled but completed anyway? */ @@ -1892,37 +1991,28 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, if (last_td_in_urb(td)) { if ((urb->actual_length != urb->transfer_buffer_length && (urb->transfer_flags & URB_SHORT_NOT_OK)) || - (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) + (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", urb, urb->actual_length, - urb->transfer_buffer_length, *status); + urb->transfer_buffer_length, status); /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) - *status = 0; - xhci_giveback_urb_in_irq(xhci, td, *status); + status = 0; + xhci_giveback_urb_in_irq(xhci, td, status); } return 0; } -static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, - struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) +static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + u32 trb_comp_code) { - struct xhci_virt_device *xdev; struct xhci_ep_ctx *ep_ctx; - struct xhci_ring *ep_ring; - unsigned int slot_id; - u32 trb_comp_code; - int ep_index; + int trbs_freed; - slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); - xdev = xhci->devs[slot_id]; - ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; - ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); - ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); - trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID || trb_comp_code == COMP_STOPPED || @@ -1936,21 +2026,36 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, if (trb_comp_code == COMP_STALL_ERROR || xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) { - /* Issue a reset endpoint command to clear the host side - * halt, followed by a set dequeue command to move the - * dequeue pointer past the TD. - * The class driver clears the device side halt later. + /* + * xhci internal endpoint state will go to a "halt" state for + * any stall, including default control pipe protocol stall. + * To clear the host side halt we need to issue a reset endpoint + * command, followed by a set dequeue command to move past the + * TD. + * Class drivers clear the device side halt from a functional + * stall later. Hub TT buffer should only be cleared for FS/LS + * devices behind HS hubs for functional stalls. */ - xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, - ep_ring->stream_id, td, EP_HARD_RESET); + if ((ep->ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR)) + xhci_clear_hub_tt_buffer(xhci, td, ep); + xhci_cleanup_halted_endpoint(xhci, ep, ep_ring->stream_id, td, + EP_HARD_RESET); } else { /* Update ring dequeue pointer */ - while (ep_ring->dequeue != td->last_trb) - inc_deq(xhci, ep_ring); + trbs_freed = xhci_num_trbs_to(ep_ring->deq_seg, ep_ring->dequeue, + td->last_trb_seg, td->last_trb, + ep_ring->num_segs); + if (trbs_freed < 0) + xhci_dbg(xhci, "Failed to count freed trbs at TD finish\n"); + else + ep_ring->num_trbs_free += trbs_freed; + ep_ring->num_trbs_free += trbs_freed; + ep_ring->dequeue = td->last_trb; + ep_ring->deq_seg = td->last_trb_seg; inc_deq(xhci, ep_ring); } - return xhci_td_cleanup(xhci, td, ep_ring, status); + return xhci_td_cleanup(xhci, td, ep_ring, td->status); } /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ @@ -1971,23 +2076,17 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, /* * Process control tds, update urb status and actual_length. */ -static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *ep_trb, struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) +static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) { - struct xhci_virt_device *xdev; - unsigned int slot_id; - int ep_index; struct xhci_ep_ctx *ep_ctx; u32 trb_comp_code; u32 remaining, requested; u32 trb_type; trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); - slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); - xdev = xhci->devs[slot_id]; - ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; - ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); requested = td->urb->transfer_buffer_length; remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); @@ -1997,13 +2096,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, if (trb_type != TRB_STATUS) { xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", (trb_type == TRB_DATA) ? "data" : "setup"); - *status = -ESHUTDOWN; + td->status = -ESHUTDOWN; break; } - *status = 0; + td->status = 0; break; case COMP_SHORT_PACKET: - *status = 0; + td->status = 0; break; case COMP_STOPPED_SHORT_PACKET: if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) @@ -2035,7 +2134,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, ep_ctx, trb_comp_code)) break; xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", - trb_comp_code, ep_index); + trb_comp_code, ep->ep_index); /* else fall through */ case COMP_STALL_ERROR: /* Did we transfer part of the data (middle) phase? */ @@ -2067,17 +2166,16 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, td->urb->actual_length = requested; finish_td: - return finish_td(xhci, td, event, ep, status); + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); } /* * Process isochronous tds, update urb packet status and actual_length. */ -static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *ep_trb, struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) +static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) { - struct xhci_ring *ep_ring; struct urb_priv *urb_priv; int idx; struct usb_iso_packet_descriptor *frame; @@ -2086,7 +2184,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, u32 remaining, requested, ep_trb_len; int short_framestatus; - ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); urb_priv = td->urb->hcpriv; idx = urb_priv->num_tds_done; @@ -2100,6 +2197,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, /* handle completion code */ switch (trb_comp_code) { case COMP_SUCCESS: + /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */ + if (td->error_mid_td) + break; if (remaining) { frame->status = short_framestatus; if (xhci->quirks & XHCI_TRUST_TX_LENGTH) @@ -2115,9 +2215,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, case COMP_BANDWIDTH_OVERRUN_ERROR: frame->status = -ECOMM; break; - case COMP_ISOCH_BUFFER_OVERRUN: case COMP_BABBLE_DETECTED_ERROR: + sum_trbs_for_length = true; + fallthrough; + case COMP_ISOCH_BUFFER_OVERRUN: frame->status = -EOVERFLOW; + if (ep_trb != td->last_trb) + td->error_mid_td = true; break; case COMP_INCOMPATIBLE_DEVICE_ERROR: case COMP_STALL_ERROR: @@ -2125,8 +2229,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, break; case COMP_USB_TRANSACTION_ERROR: frame->status = -EPROTO; + sum_trbs_for_length = true; if (ep_trb != td->last_trb) - return 0; + td->error_mid_td = true; break; case COMP_STOPPED: sum_trbs_for_length = true; @@ -2146,27 +2251,35 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, break; } + if (td->urb_length_set) + goto finish_td; + if (sum_trbs_for_length) - frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) + + frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) + ep_trb_len - remaining; else frame->actual_length = requested; td->urb->actual_length += frame->actual_length; - return finish_td(xhci, td, event, ep, status); +finish_td: + /* Don't give back TD yet if we encountered an error mid TD */ + if (td->error_mid_td && ep_trb != td->last_trb) { + xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n"); + td->urb_length_set = true; + return 0; + } + + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); } static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, - struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) + struct xhci_virt_ep *ep, int status) { - struct xhci_ring *ep_ring; struct urb_priv *urb_priv; struct usb_iso_packet_descriptor *frame; int idx; - ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); urb_priv = td->urb->hcpriv; idx = urb_priv->num_tds_done; frame = &td->urb->iso_frame_desc[idx]; @@ -2178,25 +2291,26 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, frame->actual_length = 0; /* Update ring dequeue pointer */ - while (ep_ring->dequeue != td->last_trb) - inc_deq(xhci, ep_ring); - inc_deq(xhci, ep_ring); + ep->ring->dequeue = td->last_trb; + ep->ring->deq_seg = td->last_trb_seg; + ep->ring->num_trbs_free += td->num_trbs - 1; + inc_deq(xhci, ep->ring); - return xhci_td_cleanup(xhci, td, ep_ring, status); + return xhci_td_cleanup(xhci, td, ep->ring, status); } /* * Process bulk and interrupt tds, update urb status and actual_length. */ -static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *ep_trb, struct xhci_transfer_event *event, - struct xhci_virt_ep *ep, int *status) +static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) { - struct xhci_ring *ep_ring; + struct xhci_slot_ctx *slot_ctx; u32 trb_comp_code; u32 remaining, requested, ep_trb_len; - ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); + slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); @@ -2204,6 +2318,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, switch (trb_comp_code) { case COMP_SUCCESS: + ep_ring->err_count = 0; /* handle success with untransferred data as short packet */ if (ep_trb != td->last_trb || remaining) { xhci_warn(xhci, "WARN Successful completion on short TX\n"); @@ -2211,13 +2326,13 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, td->urb->ep->desc.bEndpointAddress, requested, remaining); } - *status = 0; + td->status = 0; break; case COMP_SHORT_PACKET: xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", td->urb->ep->desc.bEndpointAddress, requested, remaining); - *status = 0; + td->status = 0; break; case COMP_STOPPED_SHORT_PACKET: td->urb->actual_length = remaining; @@ -2227,6 +2342,16 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, ep_trb_len = 0; remaining = 0; break; + case COMP_USB_TRANSACTION_ERROR: + if (xhci->quirks & XHCI_NO_SOFT_RETRY || + (ep_ring->err_count++ > MAX_SOFT_RETRY) || + le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) + break; + + td->status = 0; + xhci_cleanup_halted_endpoint(xhci, ep, ep_ring->stream_id, td, + EP_SOFT_RESET); + return 0; default: /* do nothing */ break; @@ -2244,7 +2369,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, remaining); td->urb->actual_length = 0; } - return finish_td(xhci, td, event, ep, status); + + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); } /* @@ -2276,14 +2402,13 @@ static int handle_tx_event(struct xhci_hcd *xhci, trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); ep_trb_dma = le64_to_cpu(event->buffer); - xdev = xhci->devs[slot_id]; - if (!xdev) { - xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n", - slot_id); + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) { + xhci_err(xhci, "ERROR Invalid Transfer event\n"); goto err_out; } - ep = &xdev->eps[ep_index]; + xdev = xhci->devs[slot_id]; ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma); ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); @@ -2301,8 +2426,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, case COMP_USB_TRANSACTION_ERROR: case COMP_INVALID_STREAM_TYPE_ERROR: case COMP_INVALID_STREAM_ID_ERROR: - xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0, - NULL, EP_SOFT_RESET); + xhci_cleanup_halted_endpoint(xhci, ep, 0, NULL, + EP_SOFT_RESET); goto cleanup; case COMP_RING_UNDERRUN: case COMP_RING_OVERRUN: @@ -2478,6 +2603,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", slot_id, ep_index); } + if (trb_comp_code == COMP_STALL_ERROR || + xhci_requires_manual_halt_cleanup(xhci, ep_ctx, + trb_comp_code)) { + xhci_cleanup_halted_endpoint(xhci, ep, + ep_ring->stream_id, + NULL, + EP_HARD_RESET); + } goto cleanup; } @@ -2512,17 +2645,51 @@ static int handle_tx_event(struct xhci_hcd *xhci, } if (!ep_seg) { - if (!ep->skip || - !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { - /* Some host controllers give a spurious - * successful event after a short transfer. - * Ignore it. - */ - if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && - ep_ring->last_td_was_short) { - ep_ring->last_td_was_short = false; - goto cleanup; + + if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { + skip_isoc_td(xhci, td, ep, status); + goto cleanup; + } + + /* + * Some hosts give a spurious success event after a short + * transfer. Ignore it. + */ + if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && + ep_ring->last_td_was_short) { + ep_ring->last_td_was_short = false; + goto cleanup; + } + + /* + * xhci 4.10.2 states isoc endpoints should continue + * processing the next TD if there was an error mid TD. + * So host like NEC don't generate an event for the last + * isoc TRB even if the IOC flag is set. + * xhci 4.9.1 states that if there are errors in mult-TRB + * TDs xHC should generate an error for that TRB, and if xHC + * proceeds to the next TD it should genete an event for + * any TRB with IOC flag on the way. Other host follow this. + * So this event might be for the next TD. + */ + if (td->error_mid_td && + !list_is_last(&td->td_list, &ep_ring->td_list)) { + struct xhci_td *td_next = list_next_entry(td, td_list); + + ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb, + td_next->last_trb, ep_trb_dma, false); + if (ep_seg) { + /* give back previous TD, start handling new */ + xhci_dbg(xhci, "Missing TD completion event after mid TD error\n"); + ep_ring->dequeue = td->last_trb; + ep_ring->deq_seg = td->last_trb_seg; + inc_deq(xhci, ep_ring); + xhci_td_cleanup(xhci, td, ep_ring, td->status); + td = td_next; } + } + + if (!ep_seg) { /* HC is busted, give up! */ xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not " @@ -2534,9 +2701,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep_trb_dma, true); return -ESHUTDOWN; } - - skip_isoc_td(xhci, td, event, ep, &status); - goto cleanup; } if (trb_comp_code == COMP_SHORT_PACKET) ep_ring->last_td_was_short = true; @@ -2563,25 +2727,26 @@ static int handle_tx_event(struct xhci_hcd *xhci, * endpoint. Otherwise, the endpoint remains stalled * indefinitely. */ + if (trb_is_noop(ep_trb)) { if (trb_comp_code == COMP_STALL_ERROR || xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) - xhci_cleanup_halted_endpoint(xhci, slot_id, - ep_index, + xhci_cleanup_halted_endpoint(xhci, ep, ep_ring->stream_id, td, EP_HARD_RESET); goto cleanup; } + td->status = status; + /* update the urb's actual_length and give back to the core */ if (usb_endpoint_xfer_control(&td->urb->ep->desc)) - process_ctrl_td(xhci, td, ep_trb, event, ep, &status); + process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) - process_isoc_td(xhci, td, ep_trb, event, ep, &status); + process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); else - process_bulk_intr_td(xhci, td, ep_trb, event, ep, - &status); + process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); cleanup: handling_skipped_tds = ep->skip && trb_comp_code != COMP_MISSED_SERVICE_ERROR && @@ -3278,6 +3443,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field |= TRB_IOC; more_trbs_coming = false; td->last_trb = ring->enqueue; + td->last_trb_seg = ring->enq_seg; + if (xhci_urb_suitable_for_idt(urb)) { + memcpy(&send_addr, urb->transfer_buffer, + trb_buff_len); + le64_to_cpus(&send_addr); + field |= TRB_IDT; + } } /* Only set interrupt on short packet for IN endpoints */ @@ -3297,7 +3469,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, upper_32_bits(send_addr), length_field, field); - + td->num_trbs++; addr += trb_buff_len; sent_len = trb_buff_len; @@ -3321,8 +3493,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ep_index, urb->stream_id, 1, urb, 1, mem_flags); urb_priv->td[1].last_trb = ring->enqueue; + urb_priv->td[1].last_trb_seg = ring->enq_seg; field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); + urb_priv->td[1].num_trbs++; } check_trb_math(urb, enqd_len); @@ -3373,6 +3547,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, urb_priv = urb->hcpriv; td = &urb_priv->td[0]; + td->num_trbs = num_trbs; /* * Don't give the first TRB to the hardware (by toggling the cycle bit) @@ -3416,6 +3591,16 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, if (urb->transfer_buffer_length > 0) { u32 length_field, remainder; + u64 addr; + + if (xhci_urb_suitable_for_idt(urb)) { + memcpy(&addr, urb->transfer_buffer, + urb->transfer_buffer_length); + le64_to_cpus(&addr); + field |= TRB_IDT; + } else { + addr = (u64) urb->transfer_dma; + } remainder = xhci_td_remainder(xhci, 0, urb->transfer_buffer_length, @@ -3427,14 +3612,15 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, if (setup->bRequestType & USB_DIR_IN) field |= TRB_DIR_IN; queue_trb(xhci, ep_ring, true, - lower_32_bits(urb->transfer_dma), - upper_32_bits(urb->transfer_dma), + lower_32_bits(addr), + upper_32_bits(addr), length_field, field | ep_ring->cycle_state); } /* Save the DMA address of the last TRB in the TD */ td->last_trb = ep_ring->enqueue; + td->last_trb_seg = ep_ring->enq_seg; /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ /* If the device sent data, the status stage is an OUT transfer */ @@ -3598,6 +3784,24 @@ static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, return start_frame; } +/* Check if we should generate event interrupt for a TD in an isoc URB */ +static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i) +{ + if (xhci->hci_version < 0x100) + return false; + /* always generate an event interrupt for the last TD */ + if (i == num_tds - 1) + return false; + /* + * If AVOID_BEI is set the host handles full event rings poorly, + * generate an event at least every 8th TD to clear the event ring + */ + if (i && xhci->quirks & XHCI_AVOID_BEI) + return !!(i % 8); + + return true; +} + /* This is for isoc transfer */ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index) @@ -3661,7 +3865,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, goto cleanup; } td = &urb_priv->td[i]; - + td->num_trbs = trbs_per_td; /* use SIA as default, if frame id is used overwrite it */ sia_frame_id = TRB_SIA; if (!(urb->transfer_flags & URB_ISO_ASAP) && @@ -3704,11 +3908,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } else { more_trbs_coming = false; td->last_trb = ep_ring->enqueue; + td->last_trb_seg = ep_ring->enq_seg; field |= TRB_IOC; - /* set BEI, except for the last TD */ - if (xhci->hci_version >= 0x100 && - !(xhci->quirks & XHCI_AVOID_BEI) && - i < num_tds - 1) + if (trb_block_event_intr(xhci, num_tds, i)) field |= TRB_BEI; } /* Calculate TRB length */ diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index fe042cf13bf8ef2cf6af1493ccfefebb7bf52bd0..5236017a621a9e6965d001fba1ab788fc416e4c5 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1244,6 +1244,21 @@ EXPORT_SYMBOL_GPL(xhci_resume); /*-------------------------------------------------------------------------*/ +/* + * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT), + * we'll copy the actual data into the TRB address register. This is limited to + * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize + * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed. + */ +static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, + gfp_t mem_flags) +{ + if (xhci_urb_suitable_for_idt(urb)) + return 0; + + return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); +} + /** * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and * HCDs. Find the index for an endpoint given its descriptor. Use the return @@ -2999,19 +3014,19 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, added_ctxs, added_ctxs); } -void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, - unsigned int stream_id, struct xhci_td *td) +void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, unsigned int stream_id, + struct xhci_td *td) { struct xhci_dequeue_state deq_state; - struct usb_device *udev = td->urb->dev; xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, "Cleaning up stalled endpoint ring"); /* We need to move the HW's dequeue pointer past this TD, * or it will attempt to resend it on the next doorbell ring. */ - xhci_find_new_dequeue_state(xhci, udev->slot_id, - ep_index, stream_id, td, &deq_state); + xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td, + &deq_state); if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) return; @@ -3022,7 +3037,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, "Queueing new dequeue state"); - xhci_queue_new_dequeue_state(xhci, udev->slot_id, + xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, &deq_state); } else { /* Better hope no one uses the input context between now and the @@ -3033,7 +3048,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Setting up input context for " "configure endpoint command"); - xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, + xhci_setup_input_ctx_for_quirk(xhci, slot_id, ep_index, &deq_state); } } @@ -5233,6 +5248,26 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) } EXPORT_SYMBOL_GPL(xhci_gen_setup); +static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct xhci_hcd *xhci; + struct usb_device *udev; + unsigned int slot_id; + unsigned int ep_index; + unsigned long flags; + + xhci = hcd_to_xhci(hcd); + udev = (struct usb_device *)ep->hcpriv; + slot_id = udev->slot_id; + ep_index = xhci_get_endpoint_index(&ep->desc); + + spin_lock_irqsave(&xhci->lock, flags); + xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; + xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + spin_unlock_irqrestore(&xhci->lock, flags); +} + static const struct hc_driver xhci_hc_driver = { .description = "xhci-hcd", .product_desc = "xHCI Host Controller", @@ -5255,6 +5290,7 @@ static const struct hc_driver xhci_hc_driver = { /* * managing i/o requests and associated device resources */ + .map_urb_for_dma = xhci_map_urb_for_dma, .urb_enqueue = xhci_urb_enqueue, .urb_dequeue = xhci_urb_dequeue, .alloc_dev = xhci_alloc_dev, @@ -5293,6 +5329,7 @@ static const struct hc_driver xhci_hc_driver = { .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, .find_raw_port_number = xhci_find_raw_port_number, + .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete, }; void xhci_init_driver(struct hc_driver *drv, diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 485ba36b566caff7d33a82903fe00bd2df95c47c..b7084e100e1e09b3f3fda509d907c026dde0d2f1 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -918,6 +918,8 @@ struct xhci_bw_info { #define SS_BW_RESERVED 10 struct xhci_virt_ep { + struct xhci_virt_device *vdev; /* parent */ + unsigned int ep_index; struct xhci_ring *ring; /* Related to endpoints that are configured to use stream IDs only */ struct xhci_stream_info *stream_info; @@ -936,6 +938,8 @@ struct xhci_virt_ep { #define EP_GETTING_NO_STREAMS (1 << 5) #define EP_HARD_CLEAR_TOGGLE (1 << 6) #define EP_SOFT_CLEAR_TOGGLE (1 << 7) +/* usb_hub_clear_tt_buffer is in progress */ +#define EP_CLEARING_TT (1 << 8) /* ---- Related to URB cancellation ---- */ struct list_head cancelled_td_list; /* Watchdog timer for stop endpoint command to cancel URBs */ @@ -991,8 +995,10 @@ struct xhci_interval_bw_table { unsigned int ss_bw_out; }; +#define EP_CTX_PER_DEV 31 struct xhci_virt_device { + int slot_id; struct usb_device *udev; /* * Commands to the hardware are passed an "input context" that @@ -1005,7 +1011,7 @@ struct xhci_virt_device { struct xhci_container_ctx *out_ctx; /* Used for addressing devices and configuration changes */ struct xhci_container_ctx *in_ctx; - struct xhci_virt_ep eps[31]; + struct xhci_virt_ep eps[EP_CTX_PER_DEV]; u8 fake_port; u8 real_port; struct xhci_interval_bw_table *bw_table; @@ -1312,6 +1318,8 @@ enum xhci_setup_dev { #define TRB_IOC (1<<5) /* The buffer pointer contains immediate data */ #define TRB_IDT (1<<6) +/* TDs smaller than this might use IDT */ +#define TRB_IDT_MAX_SIZE 8 /* Block Event Interrupt */ #define TRB_BEI (1<<9) @@ -1513,6 +1521,7 @@ static inline const char *xhci_trb_type_string(u8 type) /* How much data is left before the 64KB boundary? */ #define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_BUFF_SIZE - \ (addr & (TRB_MAX_BUFF_SIZE - 1))) +#define MAX_SOFT_RETRY 3 struct xhci_segment { union xhci_trb *trbs; @@ -1529,13 +1538,17 @@ struct xhci_segment { struct xhci_td { struct list_head td_list; struct list_head cancelled_td_list; + int status; struct urb *urb; struct xhci_segment *start_seg; union xhci_trb *first_trb; union xhci_trb *last_trb; + struct xhci_segment *last_trb_seg; struct xhci_segment *bounce_seg; /* actual_length of the URB has already been set */ bool urb_length_set; + bool error_mid_td; + unsigned int num_trbs; }; /* xHCI command default timeout value */ @@ -1600,6 +1613,7 @@ struct xhci_ring { * if we own the TRB (if we are the consumer). See section 4.9.1. */ u32 cycle_state; + unsigned int err_count; unsigned int stream_id; unsigned int num_segs; unsigned int num_trbs_free; @@ -1874,6 +1888,7 @@ struct xhci_hcd { #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) #define XHCI_ZHAOXIN_HOST BIT_ULL(36) #define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(39) +#define XHCI_NO_SOFT_RETRY BIT_ULL(40) unsigned int num_active_eps; unsigned int limit_active_eps; @@ -2121,13 +2136,17 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, struct xhci_dequeue_state *deq_state); -void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, - unsigned int stream_id, struct xhci_td *td); +void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, unsigned int stream_id, + struct xhci_td *td); void xhci_stop_endpoint_command_watchdog(struct timer_list *t); void xhci_handle_command_timeout(struct work_struct *work); void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, unsigned int stream_id); +void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, + unsigned int slot_id, + unsigned int ep_index); void xhci_cleanup_command_queue(struct xhci_hcd *xhci); void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring); unsigned int count_trbs(u64 addr, u64 len); @@ -2177,6 +2196,22 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, urb->stream_id); } +/* + * TODO: As per spec Isochronous IDT transmissions are supported. We bypass + * them anyways as we where unable to find a device that matches the + * constraints. + */ +static inline bool xhci_urb_suitable_for_idt(struct urb *urb) +{ + if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) && + usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE && + urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE && + !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) + return true; + + return false; +} + static inline char *xhci_slot_state_string(u32 state) { switch (state) {