]> git.baikalelectronics.ru Git - kernel.git/commitdiff
USB: WUSBCORE: clear RPIPE stall for control endpoints
authorThomas Pugliese <thomas.pugliese@gmail.com>
Thu, 15 Aug 2013 17:21:30 +0000 (12:21 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 16 Aug 2013 00:35:31 +0000 (17:35 -0700)
When the HWA encounters a STALL on a control endpoint, it should clear the
RPIPE_STALL feature on the RPIPE before processing the next transfer
request.  Otherwise, all transfer requests on that endpoint after the
first STALL will fail because the RPIPE is still in the halted state.
This also removes the unneccessary call to spin_lock_irqsave for a nested
lock that was present in the first patch.

Signed-off-by: Thomas Pugliese <thomas.pugliese@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/usb/wusbcore/wa-hc.h
drivers/usb/wusbcore/wa-rpipe.c
drivers/usb/wusbcore/wa-xfer.c
include/linux/usb/wusb-wa.h

index d6bea3e0b54a3c3493bb4b6106aad0d80c294770..cf250c21e946bcab938baafc6ca3f48ca2f1a2d3 100644 (file)
@@ -91,6 +91,7 @@
 struct wusbhc;
 struct wahc;
 extern void wa_urb_enqueue_run(struct work_struct *ws);
+extern void wa_process_errored_transfers_run(struct work_struct *ws);
 
 /**
  * RPipe instance
@@ -190,8 +191,14 @@ struct wahc {
 
        struct list_head xfer_list;
        struct list_head xfer_delayed_list;
+       struct list_head xfer_errored_list;
+       /*
+        * lock for the above xfer lists.  Can be taken while a xfer->lock is
+        * held but not in the reverse order.
+        */
        spinlock_t xfer_list_lock;
-       struct work_struct xfer_work;
+       struct work_struct xfer_enqueue_work;
+       struct work_struct xfer_error_work;
        atomic_t xfer_id_count;
 };
 
@@ -244,8 +251,10 @@ static inline void wa_init(struct wahc *wa)
        edc_init(&wa->dti_edc);
        INIT_LIST_HEAD(&wa->xfer_list);
        INIT_LIST_HEAD(&wa->xfer_delayed_list);
+       INIT_LIST_HEAD(&wa->xfer_errored_list);
        spin_lock_init(&wa->xfer_list_lock);
-       INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
+       INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
+       INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
        atomic_set(&wa->xfer_id_count, 1);
 }
 
@@ -269,6 +278,8 @@ static inline void rpipe_put(struct wa_rpipe *rpipe)
 
 }
 extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *);
+extern void rpipe_clear_feature_stalled(struct wahc *,
+                       struct usb_host_endpoint *);
 extern int wa_rpipes_create(struct wahc *);
 extern void wa_rpipes_destroy(struct wahc *);
 static inline void rpipe_avail_dec(struct wa_rpipe *rpipe)
index 9a595c1ed867ddbe29fb1d4aa9995b025262eff7..fd4f1ce6256ac7a60fd2a9d5745e88b6f3c98d63 100644 (file)
@@ -527,3 +527,24 @@ void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
        mutex_unlock(&wa->rpipe_mutex);
 }
 EXPORT_SYMBOL_GPL(rpipe_ep_disable);
+
+/* Clear the stalled status of an RPIPE. */
+void rpipe_clear_feature_stalled(struct wahc *wa, struct usb_host_endpoint *ep)
+{
+       struct wa_rpipe *rpipe;
+
+       mutex_lock(&wa->rpipe_mutex);
+       rpipe = ep->hcpriv;
+       if (rpipe != NULL) {
+               u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
+
+               usb_control_msg(
+                       wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+                       USB_REQ_CLEAR_FEATURE,
+                       USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+                       RPIPE_STALL, index, NULL, 0, 1000);
+       }
+       mutex_unlock(&wa->rpipe_mutex);
+}
+EXPORT_SYMBOL_GPL(rpipe_clear_feature_stalled);
+
index f5c81afc6e96a48334bd3bc74bab3972cfb27585..d74fe1ae16acb5cd024712fe192e832aa96f1cca 100644 (file)
@@ -1100,7 +1100,7 @@ error_xfer_submit:
  */
 void wa_urb_enqueue_run(struct work_struct *ws)
 {
-       struct wahc *wa = container_of(ws, struct wahc, xfer_work);
+       struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
        struct wa_xfer *xfer, *next;
        struct urb *urb;
        LIST_HEAD(tmp_list);
@@ -1125,6 +1125,49 @@ void wa_urb_enqueue_run(struct work_struct *ws)
 }
 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
 
+/*
+ * Process the errored transfers on the Wire Adapter outside of interrupt.
+ */
+void wa_process_errored_transfers_run(struct work_struct *ws)
+{
+       struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
+       struct wa_xfer *xfer, *next;
+       LIST_HEAD(tmp_list);
+
+       pr_info("%s: Run delayed STALL processing.\n", __func__);
+
+       /* Create a copy of the wa->xfer_errored_list while holding the lock */
+       spin_lock_irq(&wa->xfer_list_lock);
+       list_cut_position(&tmp_list, &wa->xfer_errored_list,
+                       wa->xfer_errored_list.prev);
+       spin_unlock_irq(&wa->xfer_list_lock);
+
+       /*
+        * run rpipe_clear_feature_stalled from temp list without list lock
+        * held.
+        */
+       list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
+               struct usb_host_endpoint *ep;
+               unsigned long flags;
+               struct wa_rpipe *rpipe;
+
+               spin_lock_irqsave(&xfer->lock, flags);
+               ep = xfer->ep;
+               rpipe = ep->hcpriv;
+               spin_unlock_irqrestore(&xfer->lock, flags);
+
+               /* clear RPIPE feature stalled without holding a lock. */
+               rpipe_clear_feature_stalled(wa, ep);
+
+               /* complete the xfer. This removes it from the tmp list. */
+               wa_xfer_completion(xfer);
+
+               /* check for work. */
+               wa_xfer_delayed_run(rpipe);
+       }
+}
+EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
+
 /*
  * Submit a transfer to the Wire Adapter in a delayed way
  *
@@ -1180,7 +1223,7 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
                spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
                list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
                spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
-               queue_work(wusbd, &wa->xfer_work);
+               queue_work(wusbd, &wa->xfer_enqueue_work);
        } else {
                wa_urb_enqueue_b(xfer);
        }
@@ -1222,7 +1265,8 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
 
        xfer = urb->hcpriv;
        if (xfer == NULL) {
-               /* NOthing setup yet enqueue will see urb->status !=
+               /*
+                * Nothing setup yet enqueue will see urb->status !=
                 * -EINPROGRESS (by hcd layer) and bail out with
                 * error, no need to do completion
                 */
@@ -1360,7 +1404,7 @@ static int wa_xfer_status_to_errno(u8 status)
  *
  * inbound transfers: need to schedule a DTI read
  *
- * FIXME: this functio needs to be broken up in parts
+ * FIXME: this function needs to be broken up in parts
  */
 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
 {
@@ -1482,17 +1526,37 @@ error_submit_buf_in:
        seg->result = result;
        kfree(wa->buf_in_urb->sg);
 error_sg_alloc:
+       __wa_xfer_abort(xfer);
 error_complete:
        seg->status = WA_SEG_ERROR;
        xfer->segs_done++;
        rpipe_ready = rpipe_avail_inc(rpipe);
-       __wa_xfer_abort(xfer);
        done = __wa_xfer_is_done(xfer);
-       spin_unlock_irqrestore(&xfer->lock, flags);
-       if (done)
-               wa_xfer_completion(xfer);
-       if (rpipe_ready)
-               wa_xfer_delayed_run(rpipe);
+       /*
+        * queue work item to clear STALL for control endpoints.
+        * Otherwise, let endpoint_reset take care of it.
+        */
+       if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
+               usb_endpoint_xfer_control(&xfer->ep->desc) &&
+               done) {
+
+               dev_info(dev, "Control EP stall.  Queue delayed work.\n");
+               spin_lock_irq(&wa->xfer_list_lock);
+               /* remove xfer from xfer_list. */
+               list_del(&xfer->list_node);
+               /* add xfer to xfer_errored_list. */
+               list_add_tail(&xfer->list_node, &wa->xfer_errored_list);
+               spin_unlock_irq(&wa->xfer_list_lock);
+               spin_unlock_irqrestore(&xfer->lock, flags);
+               queue_work(wusbd, &wa->xfer_error_work);
+       } else {
+               spin_unlock_irqrestore(&xfer->lock, flags);
+               if (done)
+                       wa_xfer_completion(xfer);
+               if (rpipe_ready)
+                       wa_xfer_delayed_run(rpipe);
+       }
+
        return;
 
 error_bad_seg:
index 6be985b2a4342fc7ebabc1df2245cb3460aa71cb..4ff744e2b678a27f051649b86f1c8ee4eb8210d1 100644 (file)
@@ -66,6 +66,7 @@ enum {
        WA_ENABLE = 0x01,
        WA_RESET = 0x02,
        RPIPE_PAUSE = 0x1,
+       RPIPE_STALL = 0x2,
 };
 
 /* Responses from Get Status request ([WUSB] section 8.3.1.6) */