]> git.baikalelectronics.ru Git - kernel.git/commitdiff
scsi: smartpqi: Speed up RAID 10 sequential reads
authorMike McGowen <Mike.McGowen@microchip.com>
Tue, 1 Feb 2022 21:48:48 +0000 (15:48 -0600)
committerMartin K. Petersen <martin.petersen@oracle.com>
Tue, 8 Feb 2022 04:38:35 +0000 (23:38 -0500)
Use all data disks for sequential read operations.

Testing discovered inconsistent performance on RAID 10 volumes when
performing 256K sequential reads. The driver was only using a single
tracker to determine which physical drive to send a request to for AIO
requests.

Change the single tracker (next_bypass_group) to an array of trackers based
on the number of data disks in a row of the RAID map.

Link: https://lore.kernel.org/r/164375212842.440833.6733971458765002128.stgit@brunhilda.pdev.net
Reviewed-by: Kevin Barnett <kevin.barnett@microchip.com>
Reviewed-by: Mike McGowen <mike.mcgowen@microchip.com>
Reviewed-by: Scott Benesh <scott.benesh@microchip.com>
Reviewed-by: Scott Teel <scott.teel@microchip.com>
Signed-off-by: Mike McGowen <Mike.McGowen@microchip.com>
Signed-off-by: Don Brace <don.brace@microchip.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/scsi/smartpqi/smartpqi.h
drivers/scsi/smartpqi/smartpqi_init.c

index 4f6e48854c6622835986b0611f134f975d4a022a..826c4001bac2dcc093da195da570a32effcb6f33 100644 (file)
@@ -918,7 +918,8 @@ union pqi_reset_register {
 #define PQI_MAX_TRANSFER_SIZE                  (1024U * 1024U)
 #define PQI_MAX_TRANSFER_SIZE_KDUMP            (512 * 1024U)
 
-#define RAID_MAP_MAX_ENTRIES           1024
+#define RAID_MAP_MAX_ENTRIES                   1024
+#define RAID_MAP_MAX_DATA_DISKS_PER_ROW                128
 
 #define PQI_PHYSICAL_DEVICE_BUS                0
 #define PQI_RAID_VOLUME_BUS            1
@@ -1125,7 +1126,7 @@ struct pqi_scsi_dev {
        u8      ncq_prio_support;
        bool    raid_bypass_configured; /* RAID bypass configured */
        bool    raid_bypass_enabled;    /* RAID bypass enabled */
-       u32     next_bypass_group;
+       u32     next_bypass_group[RAID_MAP_MAX_DATA_DISKS_PER_ROW];
        struct raid_map *raid_map;      /* RAID bypass map */
        u32     max_transfer_encrypted;
 
index 8bd4de6306db12670827ee4e93a4347db383e069..18c695202c52d399b121dba976d2b4653cb2bc3b 100644 (file)
@@ -2058,7 +2058,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
                sizeof(existing_device->box));
        memcpy(existing_device->phys_connector, new_device->phys_connector,
                sizeof(existing_device->phys_connector));
-       existing_device->next_bypass_group = 0;
+       memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
        kfree(existing_device->raid_map);
        existing_device->raid_map = new_device->raid_map;
        existing_device->raid_bypass_configured =
@@ -2963,11 +2963,11 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
                if (rmd.is_write) {
                        pqi_calc_aio_r1_nexus(raid_map, &rmd);
                } else {
-                       group = device->next_bypass_group;
+                       group = device->next_bypass_group[rmd.map_index];
                        next_bypass_group = group + 1;
                        if (next_bypass_group >= rmd.layout_map_count)
                                next_bypass_group = 0;
-                       device->next_bypass_group = next_bypass_group;
+                       device->next_bypass_group[rmd.map_index] = next_bypass_group;
                        rmd.map_index += group * rmd.data_disks_per_row;
                }
        } else if ((device->raid_level == SA_RAID_5 ||