struct wusbhc;
struct wahc;
extern void wa_urb_enqueue_run(struct work_struct *ws);
+extern void wa_process_errored_transfers_run(struct work_struct *ws);
/**
* RPipe instance
struct list_head xfer_list;
struct list_head xfer_delayed_list;
+ struct list_head xfer_errored_list;
+ /*
+ * lock for the above xfer lists. Can be taken while a xfer->lock is
+ * held but not in the reverse order.
+ */
spinlock_t xfer_list_lock;
- struct work_struct xfer_work;
+ struct work_struct xfer_enqueue_work;
+ struct work_struct xfer_error_work;
atomic_t xfer_id_count;
};
edc_init(&wa->dti_edc);
INIT_LIST_HEAD(&wa->xfer_list);
INIT_LIST_HEAD(&wa->xfer_delayed_list);
+ INIT_LIST_HEAD(&wa->xfer_errored_list);
spin_lock_init(&wa->xfer_list_lock);
- INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
+ INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
+ INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
atomic_set(&wa->xfer_id_count, 1);
}
}
extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *);
+extern void rpipe_clear_feature_stalled(struct wahc *,
+ struct usb_host_endpoint *);
extern int wa_rpipes_create(struct wahc *);
extern void wa_rpipes_destroy(struct wahc *);
static inline void rpipe_avail_dec(struct wa_rpipe *rpipe)
mutex_unlock(&wa->rpipe_mutex);
}
EXPORT_SYMBOL_GPL(rpipe_ep_disable);
+
+/* Clear the stalled status of an RPIPE. */
+void rpipe_clear_feature_stalled(struct wahc *wa, struct usb_host_endpoint *ep)
+{
+ struct wa_rpipe *rpipe;
+
+ mutex_lock(&wa->rpipe_mutex);
+ rpipe = ep->hcpriv;
+ if (rpipe != NULL) {
+ u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
+
+ usb_control_msg(
+ wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+ RPIPE_STALL, index, NULL, 0, 1000);
+ }
+ mutex_unlock(&wa->rpipe_mutex);
+}
+EXPORT_SYMBOL_GPL(rpipe_clear_feature_stalled);
+
*/
void wa_urb_enqueue_run(struct work_struct *ws)
{
- struct wahc *wa = container_of(ws, struct wahc, xfer_work);
+ struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
struct wa_xfer *xfer, *next;
struct urb *urb;
LIST_HEAD(tmp_list);
}
EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
+/*
+ * Process the errored transfers on the Wire Adapter outside of interrupt.
+ */
+void wa_process_errored_transfers_run(struct work_struct *ws)
+{
+ struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
+ struct wa_xfer *xfer, *next;
+ LIST_HEAD(tmp_list);
+
+ pr_info("%s: Run delayed STALL processing.\n", __func__);
+
+ /* Create a copy of the wa->xfer_errored_list while holding the lock */
+ spin_lock_irq(&wa->xfer_list_lock);
+ list_cut_position(&tmp_list, &wa->xfer_errored_list,
+ wa->xfer_errored_list.prev);
+ spin_unlock_irq(&wa->xfer_list_lock);
+
+ /*
+ * run rpipe_clear_feature_stalled from temp list without list lock
+ * held.
+ */
+ list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
+ struct usb_host_endpoint *ep;
+ unsigned long flags;
+ struct wa_rpipe *rpipe;
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ ep = xfer->ep;
+ rpipe = ep->hcpriv;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
+ /* clear RPIPE feature stalled without holding a lock. */
+ rpipe_clear_feature_stalled(wa, ep);
+
+ /* complete the xfer. This removes it from the tmp list. */
+ wa_xfer_completion(xfer);
+
+ /* check for work. */
+ wa_xfer_delayed_run(rpipe);
+ }
+}
+EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
+
/*
* Submit a transfer to the Wire Adapter in a delayed way
*
spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
- queue_work(wusbd, &wa->xfer_work);
+ queue_work(wusbd, &wa->xfer_enqueue_work);
} else {
wa_urb_enqueue_b(xfer);
}
xfer = urb->hcpriv;
if (xfer == NULL) {
- /* NOthing setup yet enqueue will see urb->status !=
+ /*
+ * Nothing setup yet enqueue will see urb->status !=
* -EINPROGRESS (by hcd layer) and bail out with
* error, no need to do completion
*/
*
* inbound transfers: need to schedule a DTI read
*
- * FIXME: this functio needs to be broken up in parts
+ * FIXME: this function needs to be broken up in parts
*/
static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
{
seg->result = result;
kfree(wa->buf_in_urb->sg);
error_sg_alloc:
+ __wa_xfer_abort(xfer);
error_complete:
seg->status = WA_SEG_ERROR;
xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- __wa_xfer_abort(xfer);
done = __wa_xfer_is_done(xfer);
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (done)
- wa_xfer_completion(xfer);
- if (rpipe_ready)
- wa_xfer_delayed_run(rpipe);
+ /*
+ * queue work item to clear STALL for control endpoints.
+ * Otherwise, let endpoint_reset take care of it.
+ */
+ if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
+ usb_endpoint_xfer_control(&xfer->ep->desc) &&
+ done) {
+
+ dev_info(dev, "Control EP stall. Queue delayed work.\n");
+ spin_lock_irq(&wa->xfer_list_lock);
+ /* remove xfer from xfer_list. */
+ list_del(&xfer->list_node);
+ /* add xfer to xfer_errored_list. */
+ list_add_tail(&xfer->list_node, &wa->xfer_errored_list);
+ spin_unlock_irq(&wa->xfer_list_lock);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ queue_work(wusbd, &wa->xfer_error_work);
+ } else {
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ }
+
return;
error_bad_seg: