diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index e7e93b8b9679ef786682472c88edc7cdfefe1c14..4f41f9ba2f280a38e0609eb992b710775d87c748 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -252,6 +252,26 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, trace_xhci_inc_enq(ring); } +static int xhci_num_trbs_to(struct xhci_segment *start_seg, union xhci_trb *start, + struct xhci_segment *end_seg, union xhci_trb *end, + unsigned int num_segs) +{ + union xhci_trb *last_on_seg; + int num = 0; + int i = 0; + + do { + if (start_seg == end_seg && end >= start) + return num + (end - start); + last_on_seg = &start_seg->trbs[TRBS_PER_SEGMENT - 1]; + num += last_on_seg - start; + start_seg = start_seg->next; + start = start_seg->trbs; + } while (i++ <= num_segs); + + return -EINVAL; +} + /* * Check to see if there's room to enqueue num_trbs on the ring and make sure * enqueue pointer will not advance into dequeue segment. See rules above. @@ -2066,16 +2086,14 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) return 0; } -static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, - struct xhci_transfer_event *event, struct xhci_virt_ep *ep) +static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + u32 trb_comp_code) { struct xhci_ep_ctx *ep_ctx; - struct xhci_ring *ep_ring; - u32 trb_comp_code; + int trbs_freed; - ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); - trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID || trb_comp_code == COMP_STOPPED || @@ -2106,8 +2124,15 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, EP_HARD_RESET); } else { /* Update ring dequeue pointer */ - while (ep_ring->dequeue != td->last_trb) - inc_deq(xhci, ep_ring); + trbs_freed = xhci_num_trbs_to(ep_ring->deq_seg, ep_ring->dequeue, + td->last_trb_seg, td->last_trb, + ep_ring->num_segs); + if (trbs_freed < 0) + xhci_dbg(xhci, "Failed to count freed trbs at TD finish\n"); + else + ep_ring->num_trbs_free += trbs_freed; + ep_ring->dequeue = td->last_trb; + ep_ring->deq_seg = td->last_trb_seg; inc_deq(xhci, ep_ring); } @@ -2132,9 +2157,9 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, /* * Process control tds, update urb status and actual_length. */ -static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *ep_trb, struct xhci_transfer_event *event, - struct xhci_virt_ep *ep) +static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) { struct xhci_ep_ctx *ep_ctx; u32 trb_comp_code; @@ -2222,15 +2247,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, td->urb->actual_length = requested; finish_td: - return finish_td(xhci, td, event, ep); + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); } /* * Process isochronous tds, update urb packet status and actual_length. */ -static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *ep_trb, struct xhci_transfer_event *event, - struct xhci_virt_ep *ep) +static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) { struct urb_priv *urb_priv; int idx; @@ -2253,6 +2278,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, /* handle completion code */ switch (trb_comp_code) { case COMP_SUCCESS: + /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */ + if (td->error_mid_td) + break; if (remaining) { frame->status = short_framestatus; if (xhci->quirks & XHCI_TRUST_TX_LENGTH) @@ -2268,9 +2296,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, case COMP_BANDWIDTH_OVERRUN_ERROR: frame->status = -ECOMM; break; - case COMP_ISOCH_BUFFER_OVERRUN: case COMP_BABBLE_DETECTED_ERROR: + sum_trbs_for_length = true; + fallthrough; + case COMP_ISOCH_BUFFER_OVERRUN: frame->status = -EOVERFLOW; + if (ep_trb != td->last_trb) + td->error_mid_td = true; break; case COMP_INCOMPATIBLE_DEVICE_ERROR: case COMP_STALL_ERROR: @@ -2278,8 +2310,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, break; case COMP_USB_TRANSACTION_ERROR: frame->status = -EPROTO; + sum_trbs_for_length = true; if (ep_trb != td->last_trb) - return 0; + td->error_mid_td = true; break; case COMP_STOPPED: sum_trbs_for_length = true; @@ -2299,6 +2332,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, break; } + if (td->urb_length_set) + goto finish_td; + if (sum_trbs_for_length) frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) + ep_trb_len - remaining; @@ -2307,7 +2343,15 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, td->urb->actual_length += frame->actual_length; - return finish_td(xhci, td, event, ep); +finish_td: + /* Don't give back TD yet if we encountered an error mid TD */ + if (td->error_mid_td && ep_trb != td->last_trb) { + xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n"); + td->urb_length_set = true; + return 0; + } + + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); } static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, @@ -2328,8 +2372,9 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, frame->actual_length = 0; /* Update ring dequeue pointer */ - while (ep->ring->dequeue != td->last_trb) - inc_deq(xhci, ep->ring); + ep->ring->dequeue = td->last_trb; + ep->ring->deq_seg = td->last_trb_seg; + ep->ring->num_trbs_free += td->num_trbs - 1; inc_deq(xhci, ep->ring); return xhci_td_cleanup(xhci, td, ep->ring, status); @@ -2338,17 +2383,15 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, /* * Process bulk and interrupt tds, update urb status and actual_length. */ -static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *ep_trb, struct xhci_transfer_event *event, - struct xhci_virt_ep *ep) +static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) { struct xhci_slot_ctx *slot_ctx; - struct xhci_ring *ep_ring; u32 trb_comp_code; u32 remaining, requested, ep_trb_len; slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); - ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); @@ -2408,7 +2451,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, remaining); td->urb->actual_length = 0; } - return finish_td(xhci, td, event, ep); + + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); } /* @@ -2693,17 +2737,51 @@ static int handle_tx_event(struct xhci_hcd *xhci, } if (!ep_seg) { - if (!ep->skip || - !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { - /* Some host controllers give a spurious - * successful event after a short transfer. - * Ignore it. - */ - if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && - ep_ring->last_td_was_short) { - ep_ring->last_td_was_short = false; - goto cleanup; + + if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { + skip_isoc_td(xhci, td, ep, status); + goto cleanup; + } + + /* + * Some hosts give a spurious success event after a short + * transfer. Ignore it. + */ + if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && + ep_ring->last_td_was_short) { + ep_ring->last_td_was_short = false; + goto cleanup; + } + + /* + * xhci 4.10.2 states isoc endpoints should continue + * processing the next TD if there was an error mid TD. + * So host like NEC don't generate an event for the last + * isoc TRB even if the IOC flag is set. + * xhci 4.9.1 states that if there are errors in mult-TRB + * TDs xHC should generate an error for that TRB, and if xHC + * proceeds to the next TD it should genete an event for + * any TRB with IOC flag on the way. Other host follow this. + * So this event might be for the next TD. + */ + if (td->error_mid_td && + !list_is_last(&td->td_list, &ep_ring->td_list)) { + struct xhci_td *td_next = list_next_entry(td, td_list); + + ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb, + td_next->last_trb, ep_trb_dma, false); + if (ep_seg) { + /* give back previous TD, start handling new */ + xhci_dbg(xhci, "Missing TD completion event after mid TD error\n"); + ep_ring->dequeue = td->last_trb; + ep_ring->deq_seg = td->last_trb_seg; + inc_deq(xhci, ep_ring); + xhci_td_cleanup(xhci, td, ep_ring, td->status); + td = td_next; } + } + + if (!ep_seg) { /* HC is busted, give up! */ xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not " @@ -2715,9 +2793,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep_trb_dma, true); return -ESHUTDOWN; } - - skip_isoc_td(xhci, td, ep, status); - goto cleanup; } if (trb_comp_code == COMP_SHORT_PACKET) ep_ring->last_td_was_short = true; @@ -2759,11 +2834,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, /* update the urb's actual_length and give back to the core */ if (usb_endpoint_xfer_control(&td->urb->ep->desc)) - process_ctrl_td(xhci, td, ep_trb, event, ep); + process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) - process_isoc_td(xhci, td, ep_trb, event, ep); + process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); else - process_bulk_intr_td(xhci, td, ep_trb, event, ep); + process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); cleanup: handling_skipped_tds = ep->skip && trb_comp_code != COMP_MISSED_SERVICE_ERROR && @@ -3494,7 +3569,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field |= TRB_IOC; more_trbs_coming = false; td->last_trb = ring->enqueue; - + td->last_trb_seg = ring->enq_seg; if (xhci_urb_suitable_for_idt(urb)) { memcpy(&send_addr, urb->transfer_buffer, trb_buff_len); @@ -3520,7 +3595,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, upper_32_bits(send_addr), length_field, field); - + td->num_trbs++; addr += trb_buff_len; sent_len = trb_buff_len; @@ -3544,8 +3619,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ep_index, urb->stream_id, 1, urb, 1, mem_flags); urb_priv->td[1].last_trb = ring->enqueue; + urb_priv->td[1].last_trb_seg = ring->enq_seg; field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); + urb_priv->td[1].num_trbs++; } check_trb_math(urb, enqd_len); @@ -3596,6 +3673,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, urb_priv = urb->hcpriv; td = &urb_priv->td[0]; + td->num_trbs = num_trbs; /* * Don't give the first TRB to the hardware (by toggling the cycle bit) @@ -3668,6 +3746,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* Save the DMA address of the last TRB in the TD */ td->last_trb = ep_ring->enqueue; + td->last_trb_seg = ep_ring->enq_seg; /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ /* If the device sent data, the status stage is an OUT transfer */ @@ -3912,7 +3991,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, goto cleanup; } td = &urb_priv->td[i]; - + td->num_trbs = trbs_per_td; /* use SIA as default, if frame id is used overwrite it */ sia_frame_id = TRB_SIA; if (!(urb->transfer_flags & URB_ISO_ASAP) && @@ -3955,6 +4034,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } else { more_trbs_coming = false; td->last_trb = ep_ring->enqueue; + td->last_trb_seg = ep_ring->enq_seg; field |= TRB_IOC; if (trb_block_event_intr(xhci, num_tds, i)) field |= TRB_BEI; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 03478edb117909a8efc26628fd6668513157e251..285db9a2288dfdb9e8f0b0f0ccf367d508ea1f1c 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1550,9 +1550,12 @@ struct xhci_td { struct xhci_segment *start_seg; union xhci_trb *first_trb; union xhci_trb *last_trb; + struct xhci_segment *last_trb_seg; struct xhci_segment *bounce_seg; /* actual_length of the URB has already been set */ bool urb_length_set; + bool error_mid_td; + unsigned int num_trbs; }; /* xHCI command default timeout value */