]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mac80211: minstrel_ht: reduce the need to sample slower rates
authorFelix Fietkau <nbd@nbd.name>
Wed, 27 Jan 2021 05:57:32 +0000 (06:57 +0100)
committerJohannes Berg <johannes.berg@intel.com>
Fri, 12 Feb 2021 07:57:24 +0000 (08:57 +0100)
In order to more gracefully be able to fall back to lower rates without too
much throughput fluctuations, initialize all untested rates below tested ones
to the maximum probabilty of higher rates.
Usually this leads to untested lower rates getting initialized with a
probability value of 100%, making them better candidates for fallback without
having to rely on random probing

Signed-off-by: Felix Fietkau <nbd@nbd.name>
Link: https://lore.kernel.org/r/20210127055735.78599-3-nbd@nbd.name
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rc80211_minstrel_ht.h

index 7846782840a971660c38ed9b5b17f2ad20ed4f5e..4d4eb4aa46bddc58a3e72d8c64017eea0579ee0b 100644 (file)
@@ -791,14 +791,11 @@ minstrel_ht_calc_rate_stats(struct minstrel_priv *mp,
        unsigned int cur_prob;
 
        if (unlikely(mrs->attempts > 0)) {
-               mrs->sample_skipped = 0;
                cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
                minstrel_filter_avg_add(&mrs->prob_avg,
                                        &mrs->prob_avg_1, cur_prob);
                mrs->att_hist += mrs->attempts;
                mrs->succ_hist += mrs->success;
-       } else {
-               mrs->sample_skipped++;
        }
 
        mrs->last_success = mrs->success;
@@ -851,7 +848,6 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
                mi->ampdu_packets = 0;
        }
 
-       mi->sample_slow = 0;
        mi->sample_count = 0;
 
        if (mi->supported[MINSTREL_CCK_GROUP])
@@ -882,6 +878,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
        /* Find best rate sets within all MCS groups*/
        for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
                u16 *tp_rate = tmp_mcs_tp_rate;
+               u16 last_prob = 0;
 
                mg = &mi->groups[group];
                if (!mi->supported[group])
@@ -896,7 +893,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
                if (group == MINSTREL_CCK_GROUP && ht_supported)
                        tp_rate = tmp_legacy_tp_rate;
 
-               for (i = 0; i < MCS_GROUP_RATES; i++) {
+               for (i = MCS_GROUP_RATES - 1; i >= 0; i--) {
                        if (!(mi->supported[group] & BIT(i)))
                                continue;
 
@@ -905,6 +902,11 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
                        mrs = &mg->rates[i];
                        mrs->retry_updated = false;
                        minstrel_ht_calc_rate_stats(mp, mrs);
+
+                       if (mrs->att_hist)
+                               last_prob = max(last_prob, mrs->prob_avg);
+                       else
+                               mrs->prob_avg = max(last_prob, mrs->prob_avg);
                        cur_prob = mrs->prob_avg;
 
                        if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
@@ -1469,13 +1471,9 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
        if (sample_dur >= minstrel_get_duration(tp_rate2) &&
            (cur_max_tp_streams - 1 <
             minstrel_mcs_groups[sample_group].streams ||
-            sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
-               if (mrs->sample_skipped < 20)
+            sample_dur >= minstrel_get_duration(mi->max_prob_rate)))
                        return -1;
 
-               if (mi->sample_slow++ > 2)
-                       return -1;
-       }
        mi->sample_tries--;
 
        return sample_idx;
index 5912f7dc526785594ff450fbd19394e894a4246c..ebb2b88f44d9a09899f8f9fa57e23b64fd653483 100644 (file)
@@ -123,7 +123,6 @@ struct minstrel_rate_stats {
        u8 retry_count;
        u8 retry_count_rtscts;
 
-       u8 sample_skipped;
        bool retry_updated;
 };
 
@@ -179,7 +178,6 @@ struct minstrel_ht_sta {
        u8 sample_wait;
        u8 sample_tries;
        u8 sample_count;
-       u8 sample_slow;
 
        enum minstrel_sample_mode sample_mode;
        u16 sample_rate;