diff --git a/linux-5.10/rk3568_patch/kernel.patch b/linux-5.10/rk3568_patch/kernel.patch index 6fed67ba9f868c917d4671bd87c7f9574e3444c8..4b83f88636018566659e1230ca161f5df014da14 100644 --- a/linux-5.10/rk3568_patch/kernel.patch +++ b/linux-5.10/rk3568_patch/kernel.patch @@ -1591211,7 +1591211,7 @@ index 41d5a46c1..1aa5ed307 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c old mode 100644 new mode 100755 -index ed380ee58..4324fd31b +index a8a9addb4d25..10c5eae5c8f3 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -65,7 +65,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, @@ -1591425,7 +1591425,7 @@ index ed380ee58..4324fd31b /* * Change an endpoint's internal structure so it supports stream IDs. The * number of requested streams includes stream 0, which cannot be used by device -@@ -906,7 +961,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) +@@ -911,7 +966,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) for (i = 0; i < 31; i++) { if (dev->eps[i].ring) @@ -1591434,25 +1591434,16 @@ index ed380ee58..4324fd31b if (dev->eps[i].stream_info) xhci_free_stream_info(xhci, dev->eps[i].stream_info); -@@ -994,6 +1049,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, - if (!dev) - return 0; +@@ -1005,6 +1060,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, + + dev->slot_id = slot_id; + dev->slot_id = slot_id; + /* Allocate the (output) device context that will be used in the HC. */ dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); if (!dev->out_ctx) -@@ -1012,6 +1069,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, - - /* Initialize the cancellation list and watchdog timers for each ep */ - for (i = 0; i < 31; i++) { -+ dev->eps[i].ep_index = i; -+ dev->eps[i].vdev = dev; - xhci_init_endpoint_timer(xhci, &dev->eps[i]); - INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); - INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list); -@@ -1501,8 +1560,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, +@@ -1514,8 +1571,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, mult = 0; /* Set up the endpoint ring */ @@ -1591471,7 +1591462,7 @@ index ed380ee58..4324fd31b if (!virt_dev->eps[ep_index].new_ring) return -ENOMEM; -@@ -1769,6 +1836,7 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, +@@ -1782,6 +1847,7 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, INIT_LIST_HEAD(&command->cmd_list); return command; } @@ -1591479,7 +1591470,7 @@ index ed380ee58..4324fd31b struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci, bool allocate_completion, gfp_t mem_flags) -@@ -1802,6 +1870,7 @@ void xhci_free_command(struct xhci_hcd *xhci, +@@ -1815,6 +1881,7 @@ void xhci_free_command(struct xhci_hcd *xhci, kfree(command->completion); kfree(command); } @@ -1591487,7 +1591478,7 @@ index ed380ee58..4324fd31b int xhci_alloc_erst(struct xhci_hcd *xhci, struct xhci_ring *evt_ring, -@@ -1832,6 +1901,7 @@ int xhci_alloc_erst(struct xhci_hcd *xhci, +@@ -1845,6 +1912,7 @@ int xhci_alloc_erst(struct xhci_hcd *xhci, return 0; } @@ -1591495,7 +1591486,7 @@ index ed380ee58..4324fd31b void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) { -@@ -1845,6 +1915,25 @@ void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) +@@ -1858,6 +1926,25 @@ void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) erst->erst_dma_addr); erst->entries = NULL; } @@ -1591521,7 +1591512,7 @@ index ed380ee58..4324fd31b void xhci_mem_cleanup(struct xhci_hcd *xhci) { -@@ -1900,9 +1989,13 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) +@@ -1913,9 +2000,13 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed medium stream array pool"); @@ -1591538,7 +1591529,7 @@ index ed380ee58..4324fd31b xhci->dcbaa = NULL; scratchpad_free(xhci); -@@ -1983,7 +2076,7 @@ static int xhci_test_trb_in_td(struct xhci_hcd *xhci, +@@ -1996,7 +2087,7 @@ static int xhci_test_trb_in_td(struct xhci_hcd *xhci, } /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ @@ -1591547,7 +1591538,7 @@ index ed380ee58..4324fd31b { struct { dma_addr_t input_dma; -@@ -2103,6 +2196,7 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci) +@@ -2116,6 +2207,7 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci) xhci_dbg(xhci, "TRB math tests passed.\n"); return 0; } @@ -1591555,7 +1591546,7 @@ index ed380ee58..4324fd31b static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) { -@@ -2442,15 +2536,21 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) +@@ -2455,15 +2547,21 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * xHCI section 5.4.6 - doorbell array must be * "physically contiguous and 64-byte (cache line) aligned". */ @@ -1592291,7 +1592282,7 @@ index 561d0b7bc..e726a5723 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c old mode 100644 new mode 100755 -index 4512c4223..a937bc6db +index ead42fc3e16d..9c890d303ca4 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -57,7 +57,10 @@ @@ -1592401,7 +1592392,7 @@ index 4512c4223..a937bc6db static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay) { -@@ -414,9 +440,8 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, +@@ -421,9 +447,8 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id)); writel(DB_VALUE(ep_index, stream_id), db_addr); @@ -1592413,7 +1592404,7 @@ index 4512c4223..a937bc6db } /* Ring the doorbell for any rings with pending URBs */ -@@ -472,6 +497,26 @@ static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci, +@@ -479,6 +504,26 @@ static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci, return &xhci->devs[slot_id]->eps[ep_index]; } @@ -1592440,7 +1592431,7 @@ index 4512c4223..a937bc6db /* Get the right ring for the given slot_id, ep_index and stream_id. * If the endpoint supports streams, boundary check the URB's stream ID. * If the endpoint doesn't support streams, return the singular endpoint ring. -@@ -486,29 +531,7 @@ struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, +@@ -493,29 +538,7 @@ struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, if (!ep) return NULL; @@ -1592471,7 +1592462,7 @@ index 4512c4223..a937bc6db } -@@ -535,97 +558,55 @@ static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, +@@ -542,97 +565,55 @@ static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, return le64_to_cpu(ep_ctx->deq); } @@ -1592591,7 +1592582,7 @@ index 4512c4223..a937bc6db /* * We want to find the pointer, segment and cycle state of the new trb -@@ -640,40 +621,71 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, +@@ -647,40 +628,71 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, if (td_last_trb_found) break; } @@ -1592679,100 +1592670,60 @@ index 4512c4223..a937bc6db } /* flip_cycle means flip the cycle bit of all but the first and last TRB. -@@ -766,153 +778,326 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, +@@ -773,6 +785,35 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, seg->bounce_offs = 0; } --/* -- * When we get a command completion for a Stop Endpoint Command, we need to -- * unlink any cancelled TDs from the ring. There are two ways to do that: -- * -- * 1. If the HW was in the middle of processing the TD that needs to be -- * cancelled, then we must move the ring's dequeue pointer past the last TRB -- * in the TD with a Set Dequeue Pointer Command. -- * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain -- * bit cleared) so that the HW will skip over them. -- */ --static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, -- union xhci_trb *trb, struct xhci_event_cmd *event) -+static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, -+ struct xhci_ring *ep_ring, int status) ++static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci, ++ struct xhci_virt_ep *ep, unsigned int stream_id, ++ struct xhci_td *td, ++ enum xhci_ep_reset_type reset_type) ++{ ++ unsigned int slot_id = ep->vdev->slot_id; ++ int err; ++ ++ /* ++ * Avoid resetting endpoint if link is inactive. Can cause host hang. ++ * Device will be reset soon to recover the link so don't do anything ++ */ ++ if (ep->vdev->flags & VDEV_PORT_ERROR) ++ return; ++ ++ ep->ep_state |= EP_HALTED; ++ ++ err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type); ++ if (err) ++ return; ++ ++ if (reset_type == EP_HARD_RESET) { ++ ep->ep_state |= EP_HARD_CLEAR_TOGGLE; ++ xhci_cleanup_stalled_ring(xhci, slot_id, ep->ep_index, stream_id, ++ td); ++ } ++ xhci_ring_cmd_db(xhci); ++} ++ + static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, + struct xhci_ring *ep_ring, int status) { -- unsigned int ep_index; -- struct xhci_ring *ep_ring; -- struct xhci_virt_ep *ep; -- struct xhci_td *cur_td = NULL; -- struct xhci_td *last_unlinked_td; -- struct xhci_ep_ctx *ep_ctx; -- struct xhci_virt_device *vdev; -- u64 hw_deq; -- struct xhci_dequeue_state deq_state; -+ struct urb *urb = NULL; - -- if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { -- if (!xhci->devs[slot_id]) -- xhci_warn(xhci, "Stop endpoint command " -- "completion for disabled slot %u\n", -- slot_id); -- return; -+ /* Clean up the endpoint's TD list */ -+ urb = td->urb; -+ -+ /* if a bounce buffer was used to align this td then unmap it */ -+ xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); -+ -+ /* Do one last check of the actual transfer length. -+ * If the host controller said we transferred more data than the buffer -+ * length, urb->actual_length will be a very big number (since it's -+ * unsigned). Play it safe and say we didn't transfer anything. -+ */ -+ if (urb->actual_length > urb->transfer_buffer_length) { -+ xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", -+ urb->transfer_buffer_length, urb->actual_length); -+ urb->actual_length = 0; -+ status = 0; +@@ -795,8 +836,10 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, + urb->actual_length = 0; + status = 0; } +- list_del_init(&td->td_list); +- /* Was this TD slated to be cancelled but completed anyway? */ + /* TD might be removed from td_list if we are giving back a cancelled URB */ + if (!list_empty(&td->td_list)) + list_del_init(&td->td_list); + /* Giving back a cancelled URB, or if a slated TD completed anyway */ -+ if (!list_empty(&td->cancelled_td_list)) -+ list_del_init(&td->cancelled_td_list); + if (!list_empty(&td->cancelled_td_list)) + list_del_init(&td->cancelled_td_list); -- memset(&deq_state, 0, sizeof(deq_state)); -- ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); -+ inc_td_cnt(urb); -+ /* Giveback the urb when all the tds are completed */ -+ if (last_td_in_urb(td)) { -+ if ((urb->actual_length != urb->transfer_buffer_length && -+ (urb->transfer_flags & URB_SHORT_NOT_OK)) || -+ (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) -+ xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", -+ urb, urb->actual_length, -+ urb->transfer_buffer_length, status); - -- ep = xhci_get_virt_ep(xhci, slot_id, ep_index); -- if (!ep) -- return; -+ /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ -+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) -+ status = 0; -+ xhci_giveback_urb_in_irq(xhci, td, status); -+ } - -- vdev = xhci->devs[slot_id]; -- ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); -- trace_xhci_handle_cmd_stop_ep(ep_ctx); -+ return 0; -+} - -- last_unlinked_td = list_last_entry(&ep->cancelled_td_list, -- struct xhci_td, cancelled_td_list); +@@ -819,6 +862,26 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, + return 0; + } -- if (list_empty(&ep->cancelled_td_list)) { -- xhci_stop_watchdog_timer_in_irq(xhci, ep); -- ring_doorbell_for_active_rings(xhci, slot_id, ep_index); -- return; ++ +/* Complete the cancelled URBs we unlinked from td_list. */ +static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep) +{ @@ -1592789,49 +1592740,29 @@ index 4512c4223..a937bc6db + + if (ep->xhci->xhc_state & XHCI_STATE_DYING) + return; - } -+} -+ -+static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, -+ unsigned int ep_index, enum xhci_ep_reset_type reset_type) -+{ -+ struct xhci_command *command; -+ int ret = 0; - -- /* Fix up the ep ring first, so HW stops executing cancelled TDs. -- * We have the xHCI lock, so nothing can modify this list until we drop -- * it. We're also in the event handler, so we can't get re-interrupted -- * if another Stop Endpoint command completes -+ command = xhci_alloc_command(xhci, false, GFP_ATOMIC); -+ if (!command) { -+ ret = -ENOMEM; -+ goto done; + } -+ -+ ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); -+done: -+ if (ret) -+ xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n", -+ slot_id, ep_index, ret); -+ return ret; +} + + static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, enum xhci_ep_reset_type reset_type) + { +@@ -839,7 +902,7 @@ static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, + return ret; + } + +-static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci, +static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci, -+ struct xhci_virt_ep *ep, unsigned int stream_id, -+ struct xhci_td *td, -+ enum xhci_ep_reset_type reset_type) -+{ -+ unsigned int slot_id = ep->vdev->slot_id; -+ int err; -+ -+ /* -+ * Avoid resetting endpoint if link is inactive. Can cause host hang. -+ * Device will be reset soon to recover the link so don't do anything + struct xhci_virt_ep *ep, unsigned int stream_id, + struct xhci_td *td, + enum xhci_ep_reset_type reset_type) +@@ -852,20 +915,130 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci, + * Device will be reset soon to recover the link so don't do anything */ -- list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) { -+ if (ep->vdev->flags & VDEV_PORT_ERROR) + if (ep->vdev->flags & VDEV_PORT_ERROR) +- return; + return -ENODEV; -+ + +- ep->ep_state |= EP_HALTED; + /* add td to cancelled list and let reset ep handler take care of it */ + if (reset_type == EP_HARD_RESET) { + ep->ep_state |= EP_HARD_CLEAR_TOGGLE; @@ -1592845,14 +1592776,20 @@ index 4512c4223..a937bc6db + xhci_dbg(xhci, "Reset ep command already pending\n"); + return 0; + } -+ -+ err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type); -+ if (err) + + err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type); + if (err) +- return; + return err; + + ep->ep_state |= EP_HALTED; -+ -+ xhci_ring_cmd_db(xhci); + +- if (reset_type == EP_HARD_RESET) { +- ep->ep_state |= EP_HARD_CLEAR_TOGGLE; +- xhci_cleanup_stalled_ring(xhci, slot_id, ep->ep_index, stream_id, +- td); +- } + xhci_ring_cmd_db(xhci); + + return 0; +} @@ -1592880,28 +1592817,9 @@ index 4512c4223..a937bc6db + xhci = ep->xhci; + + list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Removing canceled TD starting at 0x%llx (dma).", - (unsigned long long)xhci_trb_virt_to_dma( -- cur_td->start_seg, cur_td->first_trb)); -- ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); -- if (!ep_ring) { -- /* This shouldn't happen unless a driver is mucking -- * with the stream ID after submission. This will -- * leave the TD on the hardware ring, and the hardware -- * will try to execute it, and may access a buffer -- * that has already been freed. In the best case, the -- * hardware will execute it, and the event handler will -- * ignore the completion event for that TD, since it was -- * removed from the td_list for that endpoint. In -- * short, don't muck with the stream ID after -- * submission. -- */ -- xhci_warn(xhci, "WARN Cancelled URB %p " -- "has invalid stream ID %u.\n", -- cur_td->urb, -- cur_td->urb->stream_id); -- goto remove_finished_td; ++ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, ++ "Removing canceled TD starting at 0x%llx (dma).", ++ (unsigned long long)xhci_trb_virt_to_dma( + td->start_seg, td->first_trb)); + list_del_init(&td->td_list); + ring = xhci_urb_to_transfer_ring(xhci, td->urb); @@ -1592909,25 +1592827,17 @@ index 4512c4223..a937bc6db + xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n", + td->urb, td->urb->stream_id); + continue; - } - /* -- * If we stopped on the TD we need to cancel, then we have to ++ } ++ /* + * If a ring stopped on the TD we need to cancel then we have to - * move the xHC endpoint ring dequeue pointer past this TD. ++ * move the xHC endpoint ring dequeue pointer past this TD. + * Rings halted due to STALL may show hw_deq is past the stalled + * TD, but still require a set TR Deq command to flush xHC cache. - */ -- hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index, -- cur_td->urb->stream_id); ++ */ + hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index, + td->urb->stream_id); - hw_deq &= ~0xf; - -- if (trb_in_td(xhci, cur_td->start_seg, cur_td->first_trb, -- cur_td->last_trb, hw_deq, false)) { -- xhci_find_new_dequeue_state(xhci, slot_id, ep_index, -- cur_td->urb->stream_id, -- cur_td, &deq_state); ++ hw_deq &= ~0xf; ++ + if (td->cancel_status == TD_HALTED) { + cached_td = td; + } else if (trb_in_td(xhci, td->start_seg, td->first_trb, @@ -1592942,11 +1592852,10 @@ index 4512c4223..a937bc6db + cached_td = td; + break; + } - } else { -- td_to_noop(xhci, ep_ring, cur_td, false); ++ } else { + td_to_noop(xhci, ring, td, false); + td->cancel_status = TD_CLEARED; - } ++ } + } + if (cached_td) { + cached_td->cancel_status = TD_CLEARING_CACHE; @@ -1592963,14 +1592872,7 @@ index 4512c4223..a937bc6db + } + return 0; +} - --remove_finished_td: -- /* -- * The event handler won't see a completion for this TD anymore, -- * so remove it from the endpoint ring's TD list. Keep it in -- * the cancelled TD list for URB completion later. -- */ -- list_del_init(&cur_td->td_list); ++ +/* + * Returns the TD the endpoint ring halted on. + * Only call for non-running rings without streams. @@ -1592987,32 +1592889,120 @@ index 4512c4223..a937bc6db + if (trb_in_td(ep->xhci, td->start_seg, td->first_trb, + td->last_trb, hw_deq, false)) + return td; - } ++ } + return NULL; -+} + } -- xhci_stop_watchdog_timer_in_irq(xhci, ep); -+/* -+ * When we get a command completion for a Stop Endpoint Command, we need to -+ * unlink any cancelled TDs from the ring. There are two ways to do that: -+ * -+ * 1. If the HW was in the middle of processing the TD that needs to be -+ * cancelled, then we must move the ring's dequeue pointer past the last TRB -+ * in the TD with a Set Dequeue Pointer Command. -+ * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain -+ * bit cleared) so that the HW will skip over them. -+ */ -+static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, + /* +@@ -879,142 +1052,86 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci, + * bit cleared) so that the HW will skip over them. + */ + static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, +- union xhci_trb *trb, struct xhci_event_cmd *event) + union xhci_trb *trb, u32 comp_code) -+{ -+ unsigned int ep_index; -+ struct xhci_virt_ep *ep; -+ struct xhci_ep_ctx *ep_ctx; + { + unsigned int ep_index; +- struct xhci_ring *ep_ring; +- struct xhci_virt_ep *ep; +- struct xhci_td *cur_td = NULL; +- struct xhci_td *last_unlinked_td; +- struct xhci_ep_ctx *ep_ctx; +- struct xhci_virt_device *vdev; +- u64 hw_deq; +- struct xhci_dequeue_state deq_state; + struct xhci_td *td = NULL; + enum xhci_ep_reset_type reset_type; + struct xhci_command *command; + int err; + if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { + if (!xhci->devs[slot_id]) +- xhci_warn(xhci, "Stop endpoint command " +- "completion for disabled slot %u\n", +- slot_id); ++ xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n", ++ slot_id); + return; + } + +- memset(&deq_state, 0, sizeof(deq_state)); + ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); +- + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); + if (!ep) + return; + +- vdev = xhci->devs[slot_id]; +- ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); +- trace_xhci_handle_cmd_stop_ep(ep_ctx); +- +- last_unlinked_td = list_last_entry(&ep->cancelled_td_list, +- struct xhci_td, cancelled_td_list); +- +- if (list_empty(&ep->cancelled_td_list)) { +- xhci_stop_watchdog_timer_in_irq(xhci, ep); +- ring_doorbell_for_active_rings(xhci, slot_id, ep_index); +- return; +- } ++ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); + +- /* Fix up the ep ring first, so HW stops executing cancelled TDs. +- * We have the xHCI lock, so nothing can modify this list until we drop +- * it. We're also in the event handler, so we can't get re-interrupted +- * if another Stop Endpoint command completes +- */ +- list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) { +- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, +- "Removing canceled TD starting at 0x%llx (dma).", +- (unsigned long long)xhci_trb_virt_to_dma( +- cur_td->start_seg, cur_td->first_trb)); +- ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); +- if (!ep_ring) { +- /* This shouldn't happen unless a driver is mucking +- * with the stream ID after submission. This will +- * leave the TD on the hardware ring, and the hardware +- * will try to execute it, and may access a buffer +- * that has already been freed. In the best case, the +- * hardware will execute it, and the event handler will +- * ignore the completion event for that TD, since it was +- * removed from the td_list for that endpoint. In +- * short, don't muck with the stream ID after +- * submission. +- */ +- xhci_warn(xhci, "WARN Cancelled URB %p " +- "has invalid stream ID %u.\n", +- cur_td->urb, +- cur_td->urb->stream_id); +- goto remove_finished_td; +- } +- /* +- * If we stopped on the TD we need to cancel, then we have to +- * move the xHC endpoint ring dequeue pointer past this TD. +- */ +- hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index, +- cur_td->urb->stream_id); +- hw_deq &= ~0xf; +- +- if (trb_in_td(xhci, cur_td->start_seg, cur_td->first_trb, +- cur_td->last_trb, hw_deq, false)) { +- xhci_find_new_dequeue_state(xhci, slot_id, ep_index, +- cur_td->urb->stream_id, +- cur_td, &deq_state); +- } else { +- td_to_noop(xhci, ep_ring, cur_td, false); +- } +- +-remove_finished_td: +- /* +- * The event handler won't see a completion for this TD anymore, +- * so remove it from the endpoint ring's TD list. Keep it in +- * the cancelled TD list for URB completion later. +- */ +- list_del_init(&cur_td->td_list); +- } +- +- xhci_stop_watchdog_timer_in_irq(xhci, ep); +- - /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ - if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { - xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, @@ -1593021,22 +1593011,9 @@ index 4512c4223..a937bc6db - } else { - /* Otherwise ring the doorbell(s) to restart queued transfers */ - ring_doorbell_for_active_rings(xhci, slot_id, ep_index); -+ if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { -+ if (!xhci->devs[slot_id]) -+ xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n", -+ slot_id); -+ return; - } - -+ ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); -+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index); -+ if (!ep) -+ return; -+ -+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); -+ +- } + trace_xhci_handle_cmd_stop_ep(ep_ctx); -+ + + if (comp_code == COMP_CONTEXT_STATE_ERROR) { /* - * Drop the lock and complete the URBs in the cancelled TD list. @@ -1593072,7 +1593049,7 @@ index 4512c4223..a937bc6db + td->status = -EPROTO; + } + /* reset ep, reset handler cleans up cancelled tds */ -+ err = xhci_handle_halted_endpoint(xhci, ep, 0, td, ++ err = (int *)xhci_handle_halted_endpoint(xhci, ep, 0, td, + reset_type); + if (err) + break; @@ -1593119,7 +1593096,7 @@ index 4512c4223..a937bc6db } static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) -@@ -1126,10 +1311,10 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, +@@ -1228,10 +1345,10 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, unsigned int ep_index; unsigned int stream_id; struct xhci_ring *ep_ring; @@ -1593131,7 +1593108,7 @@ index 4512c4223..a937bc6db ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); -@@ -1137,8 +1322,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, +@@ -1239,8 +1356,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, if (!ep) return; @@ -1593141,7 +1593118,7 @@ index 4512c4223..a937bc6db if (!ep_ring) { xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", stream_id); -@@ -1146,8 +1330,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, +@@ -1248,8 +1364,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, goto cleanup; } @@ -1593152,7 +1593129,7 @@ index 4512c4223..a937bc6db trace_xhci_handle_cmd_set_deq(slot_ctx); trace_xhci_handle_cmd_set_deq_ep(ep_ctx); -@@ -1200,7 +1384,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, +@@ -1302,7 +1418,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, /* Update the ring's dequeue segment and dequeue pointer * to reflect the new position. */ @@ -1593161,7 +1593138,7 @@ index 4512c4223..a937bc6db ep_ring, ep_index); } else { xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); -@@ -1208,7 +1392,15 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, +@@ -1310,7 +1426,15 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, ep->queued_deq_seg, ep->queued_deq_ptr); } } @@ -1593178,7 +1593155,7 @@ index 4512c4223..a937bc6db cleanup: ep->ep_state &= ~SET_DEQ_PENDING; ep->queued_deq_seg = NULL; -@@ -1220,7 +1412,6 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, +@@ -1322,7 +1446,6 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, union xhci_trb *trb, u32 cmd_comp_code) { @@ -1593186,7 +1593163,7 @@ index 4512c4223..a937bc6db struct xhci_virt_ep *ep; struct xhci_ep_ctx *ep_ctx; unsigned int ep_index; -@@ -1230,8 +1421,7 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, +@@ -1332,8 +1455,7 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, if (!ep) return; @@ -1593196,7 +1593173,7 @@ index 4512c4223..a937bc6db trace_xhci_handle_cmd_reset_ep(ep_ctx); /* This command will only fail if the endpoint wasn't halted, -@@ -1240,27 +1430,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, +@@ -1342,27 +1464,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, "Ignoring reset ep completion code of %u", cmd_comp_code); @@ -1593231,7 +1593208,7 @@ index 4512c4223..a937bc6db /* if this was a soft reset, then restart */ if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) -@@ -1295,7 +1473,7 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) +@@ -1396,7 +1506,7 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) } static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, @@ -1593240,7 +1593217,7 @@ index 4512c4223..a937bc6db { struct xhci_virt_device *virt_dev; struct xhci_input_control_ctx *ctrl_ctx; -@@ -1313,6 +1491,8 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, +@@ -1414,6 +1524,8 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, * is not waiting on the configure endpoint command. */ virt_dev = xhci->devs[slot_id]; @@ -1593249,7 +1593226,7 @@ index 4512c4223..a937bc6db ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); if (!ctrl_ctx) { xhci_warn(xhci, "Could not get input context, bad type.\n"); -@@ -1357,24 +1537,27 @@ static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id) +@@ -1458,24 +1570,27 @@ static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id) struct xhci_slot_ctx *slot_ctx; vdev = xhci->devs[slot_id]; @@ -1593282,7 +1593259,7 @@ index 4512c4223..a937bc6db } static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, -@@ -1467,7 +1650,7 @@ void xhci_handle_command_timeout(struct work_struct *work) +@@ -1568,7 +1683,7 @@ void xhci_handle_command_timeout(struct work_struct *work) static void handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_event_cmd *event) { @@ -1593291,7 +1593268,7 @@ index 4512c4223..a937bc6db u64 cmd_dma; dma_addr_t cmd_dequeue_dma; u32 cmd_comp_code; -@@ -1475,6 +1658,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, +@@ -1576,6 +1691,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_command *cmd; u32 cmd_type; @@ -1593303,7 +1593280,7 @@ index 4512c4223..a937bc6db cmd_dma = le64_to_cpu(event->cmd_trb); cmd_trb = xhci->cmd_ring->dequeue; -@@ -1535,8 +1723,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, +@@ -1636,8 +1756,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, break; case TRB_CONFIG_EP: if (!cmd->completion) @@ -1593313,7 +1593290,7 @@ index 4512c4223..a937bc6db break; case TRB_EVAL_CONTEXT: break; -@@ -1547,7 +1734,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, +@@ -1648,7 +1767,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, WARN_ON(slot_id != TRB_TO_SLOT_ID( le32_to_cpu(cmd_trb->generic.field[3]))); if (!cmd->completion) @@ -1593323,7 +1593300,7 @@ index 4512c4223..a937bc6db break; case TRB_SET_DEQ: WARN_ON(slot_id != TRB_TO_SLOT_ID( -@@ -1570,7 +1758,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, +@@ -1671,7 +1791,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, */ slot_id = TRB_TO_SLOT_ID( le32_to_cpu(cmd_trb->generic.field[3])); @@ -1593332,7 +1593309,7 @@ index 4512c4223..a937bc6db break; case TRB_NEC_GET_FW: xhci_handle_cmd_nec_get_fw(xhci, event); -@@ -1597,11 +1785,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, +@@ -1698,11 +1818,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, } static void handle_vendor_event(struct xhci_hcd *xhci, @@ -1593345,115 +1593322,23 @@ index 4512c4223..a937bc6db xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) handle_cmd_completion(xhci, &event->event_cmd); -@@ -1918,37 +2103,6 @@ static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, - } - } - --static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, -- unsigned int slot_id, unsigned int ep_index, -- unsigned int stream_id, struct xhci_td *td, -- enum xhci_ep_reset_type reset_type) --{ -- struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; -- struct xhci_command *command; -- -- /* -- * Avoid resetting endpoint if link is inactive. Can cause host hang. -- * Device will be reset soon to recover the link so don't do anything -- */ -- if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) -- return; -- -- command = xhci_alloc_command(xhci, false, GFP_ATOMIC); -- if (!command) -- return; -- -- ep->ep_state |= EP_HALTED; -- -- xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); -- -- if (reset_type == EP_HARD_RESET) { -- ep->ep_state |= EP_HARD_CLEAR_TOGGLE; -- xhci_cleanup_stalled_ring(xhci, slot_id, ep_index, stream_id, -- td); -- } -- xhci_ring_cmd_db(xhci); --} -- - /* Check if an error has halted the endpoint ring. The class driver will - * cleanup the halt for a non-default control endpoint if we indicate a stall. - * However, a babble and other errors also halt the endpoint ring, and the class -@@ -1989,82 +2143,60 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) +@@ -2059,29 +2176,60 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) return 0; } --static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, -- struct xhci_ring *ep_ring, int *status) +-static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, +- struct xhci_transfer_event *event, struct xhci_virt_ep *ep) +static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + u32 trb_comp_code) { -- struct urb *urb = NULL; -- -- /* Clean up the endpoint's TD list */ -- urb = td->urb; -- -- /* if a bounce buffer was used to align this td then unmap it */ -- xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); -- -- /* Do one last check of the actual transfer length. -- * If the host controller said we transferred more data than the buffer -- * length, urb->actual_length will be a very big number (since it's -- * unsigned). Play it safe and say we didn't transfer anything. -- */ -- if (urb->actual_length > urb->transfer_buffer_length) { -- xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", -- urb->transfer_buffer_length, urb->actual_length); -- urb->actual_length = 0; -- *status = 0; -- } -- list_del_init(&td->td_list); -- /* Was this TD slated to be cancelled but completed anyway? */ -- if (!list_empty(&td->cancelled_td_list)) -- list_del_init(&td->cancelled_td_list); -- -- inc_td_cnt(urb); -- /* Giveback the urb when all the tds are completed */ -- if (last_td_in_urb(td)) { -- if ((urb->actual_length != urb->transfer_buffer_length && -- (urb->transfer_flags & URB_SHORT_NOT_OK)) || -- (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) -- xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", -- urb, urb->actual_length, -- urb->transfer_buffer_length, *status); -- -- /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ -- if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) -- *status = 0; -- xhci_giveback_urb_in_irq(xhci, td, *status); -- } -- -- return 0; --} -- --static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, -- struct xhci_transfer_event *event, -- struct xhci_virt_ep *ep, int *status) --{ -- struct xhci_virt_device *xdev; struct xhci_ep_ctx *ep_ctx; - struct xhci_ring *ep_ring; -- unsigned int slot_id; - u32 trb_comp_code; -- int ep_index; -- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); -- xdev = xhci->devs[slot_id]; -- ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; - ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); -- ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); - trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); -+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); - if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID || - trb_comp_code == COMP_STOPPED || @@ -1593513,22 +1593398,21 @@ index 4512c4223..a937bc6db /* * xhci internal endpoint state will go to a "halt" state for * any stall, including default control pipe protocol stall. -@@ -2075,18 +2207,24 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, +@@ -2092,18 +2240,23 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, * stall later. Hub TT buffer should only be cleared for FS/LS * devices behind HS hubs for functional stalls. */ -- if ((ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR)) +- if ((ep->ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR)) + if (ep->ep_index != 0) xhci_clear_hub_tt_buffer(xhci, td, ep); -- xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, -- ep_ring->stream_id, td, EP_HARD_RESET); + + xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, +- EP_HARD_RESET); - } else { - /* Update ring dequeue pointer */ - while (ep_ring->dequeue != td->last_trb) - inc_deq(xhci, ep_ring); - inc_deq(xhci, ep_ring); -+ -+ xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, + EP_HARD_RESET); + + return 0; /* xhci_handle_halted_endpoint marked td cancelled */ @@ -1593536,76 +1593420,33 @@ index 4512c4223..a937bc6db + break; } -- return xhci_td_cleanup(xhci, td, ep_ring, status); + /* Update ring dequeue pointer */ + ep_ring->dequeue = td->last_trb; + ep_ring->deq_seg = td->last_trb_seg; + ep_ring->num_trbs_free += td->num_trbs - 1; + inc_deq(xhci, ep_ring); + -+ return xhci_td_cleanup(xhci, td, ep_ring, td->status); + return xhci_td_cleanup(xhci, td, ep_ring, td->status); } - /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ -@@ -2107,23 +2245,17 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, +@@ -2125,9 +2278,9 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, /* * Process control tds, update urb status and actual_length. */ -static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *ep_trb, struct xhci_transfer_event *event, -- struct xhci_virt_ep *ep, int *status) +- struct xhci_virt_ep *ep) +static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) { -- struct xhci_virt_device *xdev; -- unsigned int slot_id; -- int ep_index; struct xhci_ep_ctx *ep_ctx; u32 trb_comp_code; - u32 remaining, requested; - u32 trb_type; - - trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); -- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); -- xdev = xhci->devs[slot_id]; -- ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; -- ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); -+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); - trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); - requested = td->urb->transfer_buffer_length; - remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); -@@ -2133,13 +2265,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, - if (trb_type != TRB_STATUS) { - xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", - (trb_type == TRB_DATA) ? "data" : "setup"); -- *status = -ESHUTDOWN; -+ td->status = -ESHUTDOWN; - break; - } -- *status = 0; -+ td->status = 0; - break; - case COMP_SHORT_PACKET: -- *status = 0; -+ td->status = 0; - break; - case COMP_STOPPED_SHORT_PACKET: - if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) -@@ -2171,7 +2303,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, - ep_ctx, trb_comp_code)) - break; - xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", -- trb_comp_code, ep_index); -+ trb_comp_code, ep->ep_index); - fallthrough; - case COMP_STALL_ERROR: - /* Did we transfer part of the data (middle) phase? */ -@@ -2203,17 +2335,16 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, +@@ -2215,15 +2368,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, td->urb->actual_length = requested; finish_td: -- return finish_td(xhci, td, event, ep, status); +- return finish_td(xhci, td, event, ep); + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); } @@ -1593614,75 +1593455,39 @@ index 4512c4223..a937bc6db */ -static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *ep_trb, struct xhci_transfer_event *event, -- struct xhci_virt_ep *ep, int *status) +- struct xhci_virt_ep *ep) +static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) { -- struct xhci_ring *ep_ring; struct urb_priv *urb_priv; int idx; - struct usb_iso_packet_descriptor *frame; -@@ -2222,7 +2353,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, - u32 remaining, requested, ep_trb_len; - int short_framestatus; - -- ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); - trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); - urb_priv = td->urb->hcpriv; - idx = urb_priv->num_tds_done; -@@ -2283,26 +2413,23 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, - } - - if (sum_trbs_for_length) -- frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) + -+ frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) + - ep_trb_len - remaining; - else - frame->actual_length = requested; +@@ -2300,7 +2453,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, td->urb->actual_length += frame->actual_length; -- return finish_td(xhci, td, event, ep, status); +- return finish_td(xhci, td, event, ep); + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); } static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, -- struct xhci_transfer_event *event, -- struct xhci_virt_ep *ep, int *status) -+ struct xhci_virt_ep *ep, int status) - { -- struct xhci_ring *ep_ring; - struct urb_priv *urb_priv; - struct usb_iso_packet_descriptor *frame; - int idx; - -- ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); - urb_priv = td->urb->hcpriv; - idx = urb_priv->num_tds_done; - frame = &td->urb->iso_frame_desc[idx]; -@@ -2314,31 +2441,26 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, - frame->actual_length = 0; - +@@ -2323,6 +2476,9 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, /* Update ring dequeue pointer */ -- while (ep_ring->dequeue != td->last_trb) -- inc_deq(xhci, ep_ring); -- inc_deq(xhci, ep_ring); + while (ep->ring->dequeue != td->last_trb) + inc_deq(xhci, ep->ring); + ep->ring->dequeue = td->last_trb; + ep->ring->deq_seg = td->last_trb_seg; + ep->ring->num_trbs_free += td->num_trbs - 1; -+ inc_deq(xhci, ep->ring); - -- return xhci_td_cleanup(xhci, td, ep_ring, status); -+ return xhci_td_cleanup(xhci, td, ep->ring, status); - } + inc_deq(xhci, ep->ring); + return xhci_td_cleanup(xhci, td, ep->ring, status); +@@ -2331,17 +2487,15 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, /* * Process bulk and interrupt tds, update urb status and actual_length. */ -static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *ep_trb, struct xhci_transfer_event *event, -- struct xhci_virt_ep *ep, int *status) +- struct xhci_virt_ep *ep) +static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) @@ -1593691,59 +1593496,22 @@ index 4512c4223..a937bc6db - struct xhci_ring *ep_ring; u32 trb_comp_code; u32 remaining, requested, ep_trb_len; -- unsigned int slot_id; -- int ep_index; -- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); -- slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[slot_id]->out_ctx); -- ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; + slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); - ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); -+ slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); -@@ -2354,13 +2476,13 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, - td->urb->ep->desc.bEndpointAddress, - requested, remaining); - } -- *status = 0; -+ td->status = 0; - break; - case COMP_SHORT_PACKET: - xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", - td->urb->ep->desc.bEndpointAddress, - requested, remaining); -- *status = 0; -+ td->status = 0; - break; - case COMP_STOPPED_SHORT_PACKET: - td->urb->actual_length = remaining; -@@ -2375,9 +2497,11 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, - (ep_ring->err_count++ > MAX_SOFT_RETRY) || - le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) - break; -- *status = 0; -- xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, -- ep_ring->stream_id, td, EP_SOFT_RESET); -+ -+ td->status = 0; -+ -+ xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, -+ EP_SOFT_RESET); - return 0; - default: - /* do nothing */ -@@ -2396,7 +2520,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, +@@ -2401,7 +2555,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, remaining); td->urb->actual_length = 0; } -- return finish_td(xhci, td, event, ep, status); -+ +- return finish_td(xhci, td, event, ep); + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); } /* -@@ -2407,7 +2532,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, +@@ -2412,7 +2566,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, static int handle_tx_event(struct xhci_hcd *xhci, struct xhci_transfer_event *event) { @@ -1593751,7 +1593519,7 @@ index 4512c4223..a937bc6db struct xhci_virt_ep *ep; struct xhci_ring *ep_ring; unsigned int slot_id; -@@ -2434,9 +2558,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, +@@ -2439,9 +2592,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, goto err_out; } @@ -1593762,18 +1593530,7 @@ index 4512c4223..a937bc6db if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) { xhci_err(xhci, -@@ -2452,8 +2575,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, - case COMP_USB_TRANSACTION_ERROR: - case COMP_INVALID_STREAM_TYPE_ERROR: - case COMP_INVALID_STREAM_ID_ERROR: -- xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0, -- NULL, EP_SOFT_RESET); -+ xhci_handle_halted_endpoint(xhci, ep, 0, NULL, -+ EP_SOFT_RESET); - goto cleanup; - case COMP_RING_UNDERRUN: - case COMP_RING_OVERRUN: -@@ -2508,7 +2631,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, +@@ -2519,7 +2671,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, case COMP_STALL_ERROR: xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id, ep_index); @@ -1593781,67 +1593538,22 @@ index 4512c4223..a937bc6db status = -EPIPE; break; case COMP_SPLIT_TRANSACTION_ERROR: -@@ -2636,11 +2758,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, - if (trb_comp_code == COMP_STALL_ERROR || - xhci_requires_manual_halt_cleanup(xhci, ep_ctx, - trb_comp_code)) { -- xhci_cleanup_halted_endpoint(xhci, slot_id, -- ep_index, -- ep_ring->stream_id, -- NULL, -- EP_HARD_RESET); -+ xhci_handle_halted_endpoint(xhci, ep, -+ ep_ring->stream_id, -+ NULL, -+ EP_HARD_RESET); - } - goto cleanup; - } -@@ -2699,7 +2820,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, - return -ESHUTDOWN; - } - -- skip_isoc_td(xhci, td, event, ep, &status); -+ skip_isoc_td(xhci, td, ep, status); - goto cleanup; - } - if (trb_comp_code == COMP_SHORT_PACKET) -@@ -2727,25 +2848,26 @@ static int handle_tx_event(struct xhci_hcd *xhci, - * endpoint. Otherwise, the endpoint remains stalled - * indefinitely. - */ -+ - if (trb_is_noop(ep_trb)) { - if (trb_comp_code == COMP_STALL_ERROR || - xhci_requires_manual_halt_cleanup(xhci, ep_ctx, - trb_comp_code)) -- xhci_cleanup_halted_endpoint(xhci, slot_id, -- ep_index, -- ep_ring->stream_id, -- td, EP_HARD_RESET); -+ xhci_handle_halted_endpoint(xhci, ep, -+ ep_ring->stream_id, -+ td, EP_HARD_RESET); - goto cleanup; - } +@@ -2752,11 +2903,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, -+ td->status = status; -+ /* update the urb's actual_length and give back to the core */ if (usb_endpoint_xfer_control(&td->urb->ep->desc)) -- process_ctrl_td(xhci, td, ep_trb, event, ep, &status); +- process_ctrl_td(xhci, td, ep_trb, event, ep); + process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) -- process_isoc_td(xhci, td, ep_trb, event, ep, &status); +- process_isoc_td(xhci, td, ep_trb, event, ep); + process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); else -- process_bulk_intr_td(xhci, td, ep_trb, event, ep, -- &status); +- process_bulk_intr_td(xhci, td, ep_trb, event, ep); + process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); cleanup: handling_skipped_tds = ep->skip && trb_comp_code != COMP_MISSED_SERVICE_ERROR && -@@ -2786,10 +2908,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, +@@ -2797,10 +2948,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, * Returns >0 for "possibly more events to process" (caller should call again), * otherwise 0 if done. In future, <0 returns should indicate error code. */ @@ -1593854,7 +1593566,7 @@ index 4512c4223..a937bc6db int ret; /* Event ring hasn't been allocated yet. */ -@@ -2811,31 +2934,30 @@ static int xhci_handle_event(struct xhci_hcd *xhci) +@@ -2822,31 +2974,30 @@ static int xhci_handle_event(struct xhci_hcd *xhci) * speculative reads of the event's flags/data below. */ rmb(); @@ -1593896,7 +1593608,7 @@ index 4512c4223..a937bc6db } /* Any of the above functions may drop and re-acquire the lock, so check * to make sure a watchdog timer didn't mark the host as non-responsive. -@@ -2855,13 +2977,14 @@ static int xhci_handle_event(struct xhci_hcd *xhci) +@@ -2866,13 +3017,14 @@ static int xhci_handle_event(struct xhci_hcd *xhci) */ return 1; } @@ -1593912,7 +1593624,7 @@ index 4512c4223..a937bc6db union xhci_trb *event_ring_deq) { u64 temp_64; -@@ -2891,6 +3014,16 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, +@@ -2902,6 +3054,16 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, temp_64 |= ERST_EHB; xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); } @@ -1593929,7 +1593641,7 @@ index 4512c4223..a937bc6db /* * xHCI spec says we can get an interrupt, and if the HC has an error condition, -@@ -2926,6 +3059,10 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) +@@ -2937,6 +3099,10 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) goto out; } @@ -1593940,7 +1593652,7 @@ index 4512c4223..a937bc6db /* * Clear the op reg interrupt status first, * so we can receive interrupts from other MSI-X interrupters. -@@ -3016,6 +3153,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, +@@ -3029,6 +3195,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) { unsigned int num_trbs_needed; @@ -1593948,7 +1593660,7 @@ index 4512c4223..a937bc6db /* Make sure the endpoint has been added to xHC schedule */ switch (ep_state) { -@@ -3086,7 +3224,19 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, +@@ -3099,7 +3266,19 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, ep_ring->enq_seg = ep_ring->enq_seg->next; ep_ring->enqueue = ep_ring->enq_seg->trbs; @@ -1593968,7 +1593680,7 @@ index 4512c4223..a937bc6db return 0; } -@@ -3105,7 +3255,8 @@ static int prepare_transfer(struct xhci_hcd *xhci, +@@ -3118,7 +3297,8 @@ static int prepare_transfer(struct xhci_hcd *xhci, struct xhci_ring *ep_ring; struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); @@ -1593978,7 +1593690,7 @@ index 4512c4223..a937bc6db if (!ep_ring) { xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", stream_id); -@@ -3474,7 +3625,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, +@@ -3487,7 +3667,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field |= TRB_IOC; more_trbs_coming = false; td->last_trb = ring->enqueue; @@ -1593987,7 +1593699,7 @@ index 4512c4223..a937bc6db if (xhci_urb_suitable_for_idt(urb)) { memcpy(&send_addr, urb->transfer_buffer, trb_buff_len); -@@ -3500,7 +3651,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, +@@ -3513,7 +3693,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, upper_32_bits(send_addr), length_field, field); @@ -1593996,7 +1593708,7 @@ index 4512c4223..a937bc6db addr += trb_buff_len; sent_len = trb_buff_len; -@@ -3524,8 +3675,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, +@@ -3537,8 +3717,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ep_index, urb->stream_id, 1, urb, 1, mem_flags); urb_priv->td[1].last_trb = ring->enqueue; @@ -1594007,7 +1593719,7 @@ index 4512c4223..a937bc6db } check_trb_math(urb, enqd_len); -@@ -3576,6 +3729,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, +@@ -3589,6 +3771,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, urb_priv = urb->hcpriv; td = &urb_priv->td[0]; @@ -1594015,7 +1593727,7 @@ index 4512c4223..a937bc6db /* * Don't give the first TRB to the hardware (by toggling the cycle bit) -@@ -3648,6 +3802,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, +@@ -3661,6 +3844,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* Save the DMA address of the last TRB in the TD */ td->last_trb = ep_ring->enqueue; @@ -1594023,7 +1593735,7 @@ index 4512c4223..a937bc6db /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ /* If the device sent data, the status stage is an OUT transfer */ -@@ -3892,7 +4047,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, +@@ -3905,7 +4089,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, goto cleanup; } td = &urb_priv->td[i]; @@ -1594032,7 +1593744,7 @@ index 4512c4223..a937bc6db /* use SIA as default, if frame id is used overwrite it */ sia_frame_id = TRB_SIA; if (!(urb->transfer_flags & URB_ISO_ASAP) && -@@ -3935,6 +4090,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, +@@ -3948,6 +4132,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } else { more_trbs_coming = false; td->last_trb = ep_ring->enqueue; @@ -1594040,7 +1593752,7 @@ index 4512c4223..a937bc6db field |= TRB_IOC; if (trb_block_event_intr(xhci, num_tds, i)) field |= TRB_BEI; -@@ -4217,71 +4373,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, +@@ -4230,71 +4415,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, return queue_command(xhci, cmd, 0, 0, 0, trb_slot_id | trb_ep_index | type | trb_suspend, false); } @@ -1594555,29 +1594267,10 @@ index bf42ba3e4..9b4ecda4b } EXPORT_SYMBOL_GPL(xhci_init_driver); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h -old mode 100644 -new mode 100755 -index 45584a278..b30e76ca0 +index ac09b171b783..e84fbe2b7c20 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h -@@ -921,6 +921,8 @@ struct xhci_bw_info { - #define SS_BW_RESERVED 10 - - struct xhci_virt_ep { -+ struct xhci_virt_device *vdev; /* parent */ -+ unsigned int ep_index; - struct xhci_ring *ring; - /* Related to endpoints that are configured to use stream IDs only */ - struct xhci_stream_info *stream_info; -@@ -999,6 +1001,7 @@ struct xhci_interval_bw_table { - #define EP_CTX_PER_DEV 31 - - struct xhci_virt_device { -+ int slot_id; - struct usb_device *udev; - /* - * Commands to the hardware are passed an "input context" that -@@ -1415,7 +1418,7 @@ union xhci_trb { +@@ -1422,7 +1422,7 @@ union xhci_trb { /* MFINDEX Wrap Event - microframe counter wrapped */ #define TRB_MFINDEX_WRAP 39 /* TRB IDs 40-47 reserved, 48-63 is vendor-defined */ @@ -1594586,7 +1594279,7 @@ index 45584a278..b30e76ca0 /* Nec vendor-specific command completion event. */ #define TRB_NEC_CMD_COMP 48 /* Get NEC firmware revision. */ -@@ -1535,16 +1538,27 @@ struct xhci_segment { +@@ -1542,17 +1542,27 @@ struct xhci_segment { unsigned int bounce_len; }; @@ -1594600,7 +1594293,7 @@ index 45584a278..b30e76ca0 struct xhci_td { struct list_head td_list; struct list_head cancelled_td_list; -+ int status; + int status; + enum xhci_cancelled_td_status cancel_status; struct urb *urb; struct xhci_segment *start_seg; @@ -1594614,7 +1594307,7 @@ index 45584a278..b30e76ca0 }; /* xHCI command default timeout value */ -@@ -1556,13 +1570,6 @@ struct xhci_cd { +@@ -1564,13 +1574,6 @@ struct xhci_cd { union xhci_trb *cmd_trb; }; @@ -1594628,7 +1594321,7 @@ index 45584a278..b30e76ca0 enum xhci_ring_type { TYPE_CTRL = 0, TYPE_ISOC, -@@ -1913,6 +1920,7 @@ struct xhci_hcd { +@@ -1922,6 +1925,7 @@ struct xhci_hcd { struct list_head regset_list; void *dbc; @@ -1594636,7 +1594329,7 @@ index 45584a278..b30e76ca0 /* platform-specific data -- must come last */ unsigned long priv[] __aligned(sizeof(s64)); }; -@@ -1922,8 +1930,15 @@ struct xhci_driver_overrides { +@@ -1931,8 +1935,15 @@ struct xhci_driver_overrides { size_t extra_priv_size; int (*reset)(struct usb_hcd *hcd); int (*start)(struct usb_hcd *hcd); @@ -1594652,7 +1594345,7 @@ index 45584a278..b30e76ca0 }; #define XHCI_CFC_DELAY 10 -@@ -2050,10 +2065,6 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, +@@ -2059,10 +2070,6 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, struct xhci_ring *xhci_dma_to_transfer_ring( struct xhci_virt_ep *ep, u64 address); @@ -1594663,7 +1594356,7 @@ index 45584a278..b30e76ca0 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, bool allocate_completion, gfp_t mem_flags); struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci, -@@ -2078,8 +2089,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); +@@ -2087,8 +2094,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); void xhci_shutdown(struct usb_hcd *hcd); void xhci_init_driver(struct hc_driver *drv, const struct xhci_driver_overrides *over); @@ -1594677,7 +1594370,7 @@ index 45584a278..b30e76ca0 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); int xhci_ext_cap_init(struct xhci_hcd *xhci); -@@ -2127,13 +2143,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, +@@ -2136,13 +2148,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, enum xhci_ep_reset_type reset_type); int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, u32 slot_id); @@ -1594691,7 +1594384,7 @@ index 45584a278..b30e76ca0 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, unsigned int stream_id, struct xhci_td *td); -@@ -2194,6 +2203,53 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, +@@ -2203,6 +2208,53 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, urb->stream_id); }