]> git.baikalelectronics.ru Git - kernel.git/commitdiff
Merge branch 'nvme-4.12' of git://git.infradead.org/nvme into for-4.12/post-merge
authorJens Axboe <axboe@fb.com>
Thu, 27 Apr 2017 17:33:01 +0000 (11:33 -0600)
committerJens Axboe <axboe@fb.com>
Thu, 27 Apr 2017 17:33:01 +0000 (11:33 -0600)
Christoph writes:

"A couple more updates for 4.12.  The biggest pile is fc and lpfc
 updates from James, but there are various small fixes and cleanups as
 well."

Fixes up a few merge issues, and also a warning in
lpfc_nvmet_rcv_unsol_abort() if CONFIG_NVME_TARGET_FC isn't enabled.

Signed-off-by: Jens Axboe <axboe@fb.com>
1  2 
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_nvmet.c

Simple merge
index b2333b3889c72e47d3530649aab71c5a0b1b0790,f9aafe2a21e2525343e25760a2aa5f06058f32e1..94434e621c335e678ad2aa1c3301b967cb15a210
@@@ -517,8 -558,7 +558,7 @@@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_t
                container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
        struct lpfc_hba *phba = ctxp->phba;
        struct lpfc_iocbq *nvmewqeq;
-       unsigned long iflags;
 -      int rc, id;
 +      int rc;
  
  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        if (phba->ktime_on) {
@@@ -794,7 -847,120 +847,120 @@@ voi
  lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
                            struct sli4_wcqe_xri_aborted *axri)
  {
-       /* TODO: work in progress */
+       uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+       uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+       struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+       struct lpfc_nodelist *ndlp;
+       unsigned long iflag = 0;
+       int rrq_empty = 0;
+       bool released = false;
+       lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+                       "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
+       if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+               return;
+       spin_lock_irqsave(&phba->hbalock, iflag);
+       spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+       list_for_each_entry_safe(ctxp, next_ctxp,
+                                &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+                                list) {
+               if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
+                       continue;
+               /* Check if we already received a free context call
+                * and we have completed processing an abort situation.
+                */
+               if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
+                   !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
+                       list_del(&ctxp->list);
+                       released = true;
+               }
+               ctxp->flag &= ~LPFC_NVMET_XBUSY;
+               spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+               rrq_empty = list_empty(&phba->active_rrq_list);
+               spin_unlock_irqrestore(&phba->hbalock, iflag);
+               ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
+               if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+                   (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
+                    ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
+                       lpfc_set_rrq_active(phba, ndlp,
+                               ctxp->rqb_buffer->sglq->sli4_lxritag,
+                               rxid, 1);
+                       lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+               }
+               lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+                               "6318 XB aborted %x flg x%x (%x)\n",
+                               ctxp->oxid, ctxp->flag, released);
+               if (released)
+                       lpfc_nvmet_rq_post(phba, ctxp,
+                                          &ctxp->rqb_buffer->hbuf);
+               if (rrq_empty)
+                       lpfc_worker_wake_up(phba);
+               return;
+       }
+       spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+       spin_unlock_irqrestore(&phba->hbalock, iflag);
+ }
+ int
+ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
+                          struct fc_frame_header *fc_hdr)
+ {
+ #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+       struct lpfc_hba *phba = vport->phba;
+       struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+       struct nvmefc_tgt_fcp_req *rsp;
+       uint16_t xri;
+       unsigned long iflag = 0;
+       xri = be16_to_cpu(fc_hdr->fh_ox_id);
+       spin_lock_irqsave(&phba->hbalock, iflag);
+       spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+       list_for_each_entry_safe(ctxp, next_ctxp,
+                                &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+                                list) {
+               if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
+                       continue;
+               spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+               spin_unlock_irqrestore(&phba->hbalock, iflag);
+               spin_lock_irqsave(&ctxp->ctxlock, iflag);
+               ctxp->flag |= LPFC_NVMET_ABTS_RCV;
+               spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+               lpfc_nvmeio_data(phba,
+                       "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
+                       xri, smp_processor_id(), 0);
+               lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+                               "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
+               rsp = &ctxp->ctx.fcp_req;
+               nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
+               /* Respond with BA_ACC accordingly */
+               lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
+               return 0;
+       }
+       spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+       spin_unlock_irqrestore(&phba->hbalock, iflag);
+       lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
+                        xri, smp_processor_id(), 1);
+       lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+                       "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
+       /* Respond with BA_RJT accordingly */
+       lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
 -      return 0;
+ #endif
++      return 0;
  }
  
  void