struct kmem_cache *t10_alua_lba_map_mem_cache;
static void transport_complete_task_attr(struct se_cmd *cmd);
+static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd,
- struct se_device *dev);
+ struct se_device *dev, int err, bool write_pending);
static int transport_put_cmd(struct se_cmd *cmd);
static void target_complete_ok_work(struct work_struct *work);
if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
transport_write_pending_qf(cmd);
- else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
+ else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
+ cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
transport_complete_qf(cmd);
}
}
}
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
goto check_stop;
default:
}
ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
check_stop:
return;
queue_full:
- cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
- transport_handle_queue_full(cmd, cmd->se_dev);
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
EXPORT_SYMBOL(transport_generic_request_failure);
int ret = 0;
transport_complete_task_attr(cmd);
+ /*
+ * If a fabric driver ->write_pending() or ->queue_data_in() callback
+ * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
+ * the same callbacks should not be retried. Return CHECK_CONDITION
+ * if a scsi_status is not already set.
+ *
+ * If a fabric driver ->queue_status() has returned non zero, always
+ * keep retrying no matter what..
+ */
+ if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
+ if (cmd->scsi_status)
+ goto queue_status;
- if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
- trace_target_cmd_complete(cmd);
- ret = cmd->se_tfo->queue_status(cmd);
- goto out;
+ cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+ cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
+ translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
+ goto queue_status;
}
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+ goto queue_status;
+
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
if (cmd->scsi_status)
break;
}
-out:
if (ret < 0) {
- transport_handle_queue_full(cmd, cmd->se_dev);
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
return;
}
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
}
-static void transport_handle_queue_full(
- struct se_cmd *cmd,
- struct se_device *dev)
+static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
+ int err, bool write_pending)
{
+ /*
+ * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
+ * ->queue_data_in() callbacks from new process context.
+ *
+ * Otherwise for other errors, transport_complete_qf() will send
+ * CHECK_CONDITION via ->queue_status() instead of attempting to
+ * retry associated fabric driver data-transfer callbacks.
+ */
+ if (err == -EAGAIN || err == -ENOMEM) {
+ cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
+ TRANSPORT_COMPLETE_QF_OK;
+ } else {
+ pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
+ cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
+ }
+
spin_lock_irq(&dev->qf_cmd_lock);
list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
atomic_inc_mb(&dev->dev_qf_count);
WARN_ON(!cmd->scsi_status);
ret = transport_send_check_condition_and_sense(
cmd, 0, 1);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
transport_lun_remove_cmd(cmd);
} else if (rc) {
ret = transport_send_check_condition_and_sense(cmd,
rc, 0);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
transport_lun_remove_cmd(cmd);
if (target_read_prot_action(cmd)) {
ret = transport_send_check_condition_and_sense(cmd,
cmd->pi_err, 0);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
transport_lun_remove_cmd(cmd);
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
break;
case DMA_TO_DEVICE:
atomic_long_add(cmd->data_length,
&cmd->se_lun->lun_stats.tx_data_octets);
ret = cmd->se_tfo->queue_data_in(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
break;
}
queue_status:
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
break;
default:
queue_full:
pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
" data_direction: %d\n", cmd, cmd->data_direction);
- cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
- transport_handle_queue_full(cmd, cmd->se_dev);
+
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
void target_free_sgl(struct scatterlist *sgl, int nents)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
ret = cmd->se_tfo->write_pending(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
- /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
- WARN_ON(ret);
-
- return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return 0;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
- cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
- transport_handle_queue_full(cmd, cmd->se_dev);
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
return 0;
}
EXPORT_SYMBOL(transport_generic_new_cmd);
int ret;
ret = cmd->se_tfo->write_pending(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM) {
+ if (ret) {
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
cmd);
- transport_handle_queue_full(cmd, cmd->se_dev);
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
}
}
__releases(&cmd->t_state_lock)
__acquires(&cmd->t_state_lock)
{
+ int ret;
+
assert_spin_locked(&cmd->t_state_lock);
WARN_ON_ONCE(!irqs_disabled());
trace_target_cmd_complete(cmd);
spin_unlock_irq(&cmd->t_state_lock);
- cmd->se_tfo->queue_status(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (ret)
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
spin_lock_irq(&cmd->t_state_lock);
return 1;
void transport_send_task_abort(struct se_cmd *cmd)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
cmd->t_task_cdb[0], cmd->tag);
trace_target_cmd_complete(cmd);
- cmd->se_tfo->queue_status(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (ret)
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
static void target_tmr_work(struct work_struct *work)