]> git.baikalelectronics.ru Git - kernel.git/commitdiff
qed: fix possible unpaired spin_{un}lock_bh in _qed_mcp_cmd_and_union()
authorJia He <justin.he@arm.com>
Thu, 15 Jul 2021 08:08:21 +0000 (16:08 +0800)
committerDavid S. Miller <davem@davemloft.net>
Thu, 15 Jul 2021 19:35:38 +0000 (12:35 -0700)
Liajian reported a bug_on hit on a ThunderX2 arm64 server with FastLinQ
QL41000 ethernet controller:
 BUG: scheduling while atomic: kworker/0:4/531/0x00000200
  [qed_probe:488()]hw prepare failed
  kernel BUG at mm/vmalloc.c:2355!
  Internal error: Oops - BUG: 0 [#1] SMP
  CPU: 0 PID: 531 Comm: kworker/0:4 Tainted: G W 5.4.0-77-generic #86-Ubuntu
  pstate: 00400009 (nzcv daif +PAN -UAO)
 Call trace:
  vunmap+0x4c/0x50
  iounmap+0x48/0x58
  qed_free_pci+0x60/0x80 [qed]
  qed_probe+0x35c/0x688 [qed]
  __qede_probe+0x88/0x5c8 [qede]
  qede_probe+0x60/0xe0 [qede]
  local_pci_probe+0x48/0xa0
  work_for_cpu_fn+0x24/0x38
  process_one_work+0x1d0/0x468
  worker_thread+0x238/0x4e0
  kthread+0xf0/0x118
  ret_from_fork+0x10/0x18

In this case, qed_hw_prepare() returns error due to hw/fw error, but in
theory work queue should be in process context instead of interrupt.

The root cause might be the unpaired spin_{un}lock_bh() in
_qed_mcp_cmd_and_union(), which causes botton half is disabled incorrectly.

Reported-by: Lijian Zhang <Lijian.Zhang@arm.com>
Signed-off-by: Jia He <justin.he@arm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/qlogic/qed/qed_mcp.c

index 4387292c37e2f9776592aa1143a635f9ad6aff27..79d879a5d66363e740d3a2dfb7ec0f4da67b09c7 100644 (file)
@@ -474,14 +474,18 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
 
                spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
 
-               if (!qed_mcp_has_pending_cmd(p_hwfn))
+               if (!qed_mcp_has_pending_cmd(p_hwfn)) {
+                       spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
                        break;
+               }
 
                rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
-               if (!rc)
+               if (!rc) {
+                       spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
                        break;
-               else if (rc != -EAGAIN)
+               } else if (rc != -EAGAIN) {
                        goto err;
+               }
 
                spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
 
@@ -498,6 +502,8 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                return -EAGAIN;
        }
 
+       spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
        /* Send the mailbox command */
        qed_mcp_reread_offsets(p_hwfn, p_ptt);
        seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
@@ -524,14 +530,18 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
 
                spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
 
-               if (p_cmd_elem->b_is_completed)
+               if (p_cmd_elem->b_is_completed) {
+                       spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
                        break;
+               }
 
                rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
-               if (!rc)
+               if (!rc) {
+                       spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
                        break;
-               else if (rc != -EAGAIN)
+               } else if (rc != -EAGAIN) {
                        goto err;
+               }
 
                spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
        } while (++cnt < max_retries);
@@ -554,6 +564,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                return -EAGAIN;
        }
 
+       spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
        qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
        spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);