#define HSQ_NUM_SLOTS 64
#define HSQ_INVALID_TAG HSQ_NUM_SLOTS
+static void mmc_hsq_retry_handler(struct work_struct *work)
+{
+ struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
+ struct mmc_host *mmc = hsq->mmc;
+
+ mmc->ops->request(mmc, hsq->mrq);
+}
+
static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
{
struct mmc_host *mmc = hsq->mmc;
struct hsq_slot *slot;
unsigned long flags;
+ int ret = 0;
spin_lock_irqsave(&hsq->lock, flags);
spin_unlock_irqrestore(&hsq->lock, flags);
- mmc->ops->request(mmc, hsq->mrq);
+ if (mmc->ops->request_atomic)
+ ret = mmc->ops->request_atomic(mmc, hsq->mrq);
+ else
+ mmc->ops->request(mmc, hsq->mrq);
+
+ /*
+ * If returning BUSY from request_atomic(), which means the card
+ * may be busy now, and we should change to non-atomic context to
+ * try again for this unusual case, to avoid time-consuming operations
+ * in the atomic context.
+ *
+ * Note: we just give a warning for other error cases, since the host
+ * driver will handle them.
+ */
+ if (ret == -EBUSY)
+ schedule_work(&hsq->retry_work);
+ else
+ WARN_ON_ONCE(ret);
}
static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
hsq->mmc->cqe_private = hsq;
mmc->cqe_ops = &mmc_hsq_ops;
+ INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
spin_lock_init(&hsq->lock);
init_waitqueue_head(&hsq->wait_queue);
int err);
void (*pre_req)(struct mmc_host *host, struct mmc_request *req);
void (*request)(struct mmc_host *host, struct mmc_request *req);
+ /* Submit one request to host in atomic context. */
+ int (*request_atomic)(struct mmc_host *host,
+ struct mmc_request *req);
/*
* Avoid calling the next three functions too often or in a "fast