]> git.baikalelectronics.ru Git - kernel.git/commitdiff
scsi: target: Add iscsi/cpus_allowed_list in configfs
authorMingzhe Zou <mingzhe.zou@easystack.cn>
Tue, 1 Mar 2022 07:55:00 +0000 (15:55 +0800)
committerMartin K. Petersen <martin.petersen@oracle.com>
Tue, 15 Mar 2022 03:40:36 +0000 (23:40 -0400)
The RX/TX threads for iSCSI connection can be scheduled to any online CPUs,
and will not be rescheduled.

When binding other heavy load threads along with iSCSI connection RX/TX
thread to the same CPU, the iSCSI performance will be worse.

Add iscsi/cpus_allowed_list in configfs. The available CPU set of iSCSI
connection RX/TX threads is allowed_cpus & online_cpus. If it is modified,
all RX/TX threads will be rescheduled.

Link: https://lore.kernel.org/r/20220301075500.14266-1-mingzhe.zou@easystack.cn
Reviewed-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Mingzhe Zou <mingzhe.zou@easystack.cn>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_login.c
include/target/iscsi/iscsi_target_core.h

index 2c54c5d8412d8e05453f3f5028cb6adf6a36a60e..6fe6a6bab3f465703a82c578ae8365f8d5931be0 100644 (file)
@@ -702,13 +702,19 @@ static int __init iscsi_target_init_module(void)
        if (!iscsit_global->ts_bitmap)
                goto configfs_out;
 
+       if (!zalloc_cpumask_var(&iscsit_global->allowed_cpumask, GFP_KERNEL)) {
+               pr_err("Unable to allocate iscsit_global->allowed_cpumask\n");
+               goto bitmap_out;
+       }
+       cpumask_setall(iscsit_global->allowed_cpumask);
+
        lio_qr_cache = kmem_cache_create("lio_qr_cache",
                        sizeof(struct iscsi_queue_req),
                        __alignof__(struct iscsi_queue_req), 0, NULL);
        if (!lio_qr_cache) {
                pr_err("Unable to kmem_cache_create() for"
                                " lio_qr_cache\n");
-               goto bitmap_out;
+               goto cpumask_out;
        }
 
        lio_dr_cache = kmem_cache_create("lio_dr_cache",
@@ -753,6 +759,8 @@ dr_out:
        kmem_cache_destroy(lio_dr_cache);
 qr_out:
        kmem_cache_destroy(lio_qr_cache);
+cpumask_out:
+       free_cpumask_var(iscsit_global->allowed_cpumask);
 bitmap_out:
        vfree(iscsit_global->ts_bitmap);
 configfs_out:
@@ -782,6 +790,7 @@ static void __exit iscsi_target_cleanup_module(void)
 
        target_unregister_template(&iscsi_ops);
 
+       free_cpumask_var(iscsit_global->allowed_cpumask);
        vfree(iscsit_global->ts_bitmap);
        kfree(iscsit_global);
 }
@@ -3587,6 +3596,11 @@ static int iscsit_send_reject(
 void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
 {
        int ord, cpu;
+       cpumask_t conn_allowed_cpumask;
+
+       cpumask_and(&conn_allowed_cpumask, iscsit_global->allowed_cpumask,
+                   cpu_online_mask);
+
        /*
         * bitmap_id is assigned from iscsit_global->ts_bitmap from
         * within iscsit_start_kthreads()
@@ -3595,8 +3609,9 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
         * iSCSI connection's RX/TX threads will be scheduled to
         * execute upon.
         */
-       ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
-       for_each_online_cpu(cpu) {
+       cpumask_clear(conn->conn_cpumask);
+       ord = conn->bitmap_id % cpumask_weight(&conn_allowed_cpumask);
+       for_each_cpu(cpu, &conn_allowed_cpumask) {
                if (ord-- == 0) {
                        cpumask_set_cpu(cpu, conn->conn_cpumask);
                        return;
@@ -3609,6 +3624,62 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
        cpumask_setall(conn->conn_cpumask);
 }
 
+static void iscsit_thread_reschedule(struct iscsi_conn *conn)
+{
+       /*
+        * If iscsit_global->allowed_cpumask modified, reschedule iSCSI
+        * connection's RX/TX threads update conn->allowed_cpumask.
+        */
+       if (!cpumask_equal(iscsit_global->allowed_cpumask,
+                          conn->allowed_cpumask)) {
+               iscsit_thread_get_cpumask(conn);
+               conn->conn_tx_reset_cpumask = 1;
+               conn->conn_rx_reset_cpumask = 1;
+               cpumask_copy(conn->allowed_cpumask,
+                            iscsit_global->allowed_cpumask);
+       }
+}
+
+void iscsit_thread_check_cpumask(
+       struct iscsi_conn *conn,
+       struct task_struct *p,
+       int mode)
+{
+       /*
+        * The TX and RX threads maybe call iscsit_thread_check_cpumask()
+        * at the same time. The RX thread might be faster and return from
+        * iscsit_thread_reschedule() with conn_rx_reset_cpumask set to 0.
+        * Then the TX thread sets it back to 1.
+        * The next time the RX thread loops, it sees conn_rx_reset_cpumask
+        * set to 1 and calls set_cpus_allowed_ptr() again and set it to 0.
+        */
+       iscsit_thread_reschedule(conn);
+
+       /*
+        * mode == 1 signals iscsi_target_tx_thread() usage.
+        * mode == 0 signals iscsi_target_rx_thread() usage.
+        */
+       if (mode == 1) {
+               if (!conn->conn_tx_reset_cpumask)
+                       return;
+       } else {
+               if (!conn->conn_rx_reset_cpumask)
+                       return;
+       }
+
+       /*
+        * Update the CPU mask for this single kthread so that
+        * both TX and RX kthreads are scheduled to run on the
+        * same CPU.
+        */
+       set_cpus_allowed_ptr(p, conn->conn_cpumask);
+       if (mode == 1)
+               conn->conn_tx_reset_cpumask = 0;
+       else
+               conn->conn_rx_reset_cpumask = 0;
+}
+EXPORT_SYMBOL(iscsit_thread_check_cpumask);
+
 int
 iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
 {
index 2a9de24a8bbe2d61e9fa36358d7740c41cfd9581..0cedcfe207b56b8b5bae41cdac9a876b2227dc78 100644 (file)
@@ -1127,8 +1127,40 @@ static ssize_t lio_target_wwn_lio_version_show(struct config_item *item,
 
 CONFIGFS_ATTR_RO(lio_target_wwn_, lio_version);
 
+static ssize_t lio_target_wwn_cpus_allowed_list_show(
+               struct config_item *item, char *page)
+{
+       return sprintf(page, "%*pbl\n",
+                      cpumask_pr_args(iscsit_global->allowed_cpumask));
+}
+
+static ssize_t lio_target_wwn_cpus_allowed_list_store(
+               struct config_item *item, const char *page, size_t count)
+{
+       int ret;
+       char *orig;
+       cpumask_t new_allowed_cpumask;
+
+       orig = kstrdup(page, GFP_KERNEL);
+       if (!orig)
+               return -ENOMEM;
+
+       cpumask_clear(&new_allowed_cpumask);
+       ret = cpulist_parse(orig, &new_allowed_cpumask);
+
+       kfree(orig);
+       if (ret != 0)
+               return ret;
+
+       cpumask_copy(iscsit_global->allowed_cpumask, &new_allowed_cpumask);
+       return count;
+}
+
+CONFIGFS_ATTR(lio_target_wwn_, cpus_allowed_list);
+
 static struct configfs_attribute *lio_target_wwn_attrs[] = {
        &lio_target_wwn_attr_lio_version,
+       &lio_target_wwn_attr_cpus_allowed_list,
        NULL,
 };
 
index 1a9c50401bdb55b1c1a63552b81187bf517da616..9c01fb86458539301c9b6209732498ae7a2154c1 100644 (file)
@@ -1129,8 +1129,15 @@ static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np)
                goto free_conn_ops;
        }
 
+       if (!zalloc_cpumask_var(&conn->allowed_cpumask, GFP_KERNEL)) {
+               pr_err("Unable to allocate conn->allowed_cpumask\n");
+               goto free_conn_cpumask;
+       }
+
        return conn;
 
+free_conn_cpumask:
+       free_cpumask_var(conn->conn_cpumask);
 free_conn_ops:
        kfree(conn->conn_ops);
 put_transport:
@@ -1142,6 +1149,7 @@ free_conn:
 
 void iscsit_free_conn(struct iscsi_conn *conn)
 {
+       free_cpumask_var(conn->allowed_cpumask);
        free_cpumask_var(conn->conn_cpumask);
        kfree(conn->conn_ops);
        iscsit_put_transport(conn->conn_transport);
index 1eccb2ac7d0298c25e80056101aca1313835e396..adc87de0362b542678a677d5429f0aa3b3d7812c 100644 (file)
@@ -580,6 +580,7 @@ struct iscsi_conn {
        struct ahash_request    *conn_tx_hash;
        /* Used for scheduling TX and RX connection kthreads */
        cpumask_var_t           conn_cpumask;
+       cpumask_var_t           allowed_cpumask;
        unsigned int            conn_rx_reset_cpumask:1;
        unsigned int            conn_tx_reset_cpumask:1;
        /* list_head of struct iscsi_cmd for this connection */
@@ -878,6 +879,7 @@ struct iscsit_global {
        /* Thread Set bitmap pointer */
        unsigned long           *ts_bitmap;
        spinlock_t              ts_bitmap_lock;
+       cpumask_var_t           allowed_cpumask;
        /* Used for iSCSI discovery session authentication */
        struct iscsi_node_acl   discovery_acl;
        struct iscsi_portal_group       *discovery_tpg;
@@ -898,29 +900,8 @@ static inline u32 session_get_next_ttt(struct iscsi_session *session)
 
 extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
 
-static inline void iscsit_thread_check_cpumask(
-       struct iscsi_conn *conn,
-       struct task_struct *p,
-       int mode)
-{
-       /*
-        * mode == 1 signals iscsi_target_tx_thread() usage.
-        * mode == 0 signals iscsi_target_rx_thread() usage.
-        */
-       if (mode == 1) {
-               if (!conn->conn_tx_reset_cpumask)
-                       return;
-               conn->conn_tx_reset_cpumask = 0;
-       } else {
-               if (!conn->conn_rx_reset_cpumask)
-                       return;
-               conn->conn_rx_reset_cpumask = 0;
-       }
-       /*
-        * Update the CPU mask for this single kthread so that
-        * both TX and RX kthreads are scheduled to run on the
-        * same CPU.
-        */
-       set_cpus_allowed_ptr(p, conn->conn_cpumask);
-}
+extern void iscsit_thread_check_cpumask(struct iscsi_conn *conn,
+                                       struct task_struct *p,
+                                       int mode);
+
 #endif /* ISCSI_TARGET_CORE_H */