]> git.baikalelectronics.ru Git - kernel.git/commitdiff
cachefiles: make on-demand request distribution fairer
authorXin Yin <yinxin.x@bytedance.com>
Thu, 25 Aug 2022 02:09:45 +0000 (10:09 +0800)
committerDavid Howells <dhowells@redhat.com>
Wed, 31 Aug 2022 15:41:10 +0000 (16:41 +0100)
For now, enqueuing and dequeuing on-demand requests all start from
idx 0, this makes request distribution unfair. In the weighty
concurrent I/O scenario, the request stored in higher idx will starve.

Searching requests cyclically in cachefiles_ondemand_daemon_read,
makes distribution fairer.

Fixes: c8383054506c ("cachefiles: notify the user daemon when looking up cookie")
Reported-by: Yongqing Li <liyongqing@bytedance.com>
Signed-off-by: Xin Yin <yinxin.x@bytedance.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeffle Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20220817065200.11543-1-yinxin.x@bytedance.com/
Link: https://lore.kernel.org/r/20220825020945.2293-1-yinxin.x@bytedance.com/
fs/cachefiles/internal.h
fs/cachefiles/ondemand.c

index 6cba2c6de2f963d762da94aea13f37dedd33349a..2ad58c465208480e42106393ae01fd2f317389ee 100644 (file)
@@ -111,6 +111,7 @@ struct cachefiles_cache {
        char                            *tag;           /* cache binding tag */
        refcount_t                      unbind_pincount;/* refcount to do daemon unbind */
        struct xarray                   reqs;           /* xarray of pending on-demand requests */
+       unsigned long                   req_id_next;
        struct xarray                   ondemand_ids;   /* xarray for ondemand_id allocation */
        u32                             ondemand_id_next;
 };
index 7e1586bd5cf34e644deb5dd76a238db1f082bfdc..0254ed39f68ceb75dcb2ec840af88b136ac21fe3 100644 (file)
@@ -242,14 +242,19 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
        unsigned long id = 0;
        size_t n;
        int ret = 0;
-       XA_STATE(xas, &cache->reqs, 0);
+       XA_STATE(xas, &cache->reqs, cache->req_id_next);
 
        /*
-        * Search for a request that has not ever been processed, to prevent
-        * requests from being processed repeatedly.
+        * Cyclically search for a request that has not ever been processed,
+        * to prevent requests from being processed repeatedly, and make
+        * request distribution fair.
         */
        xa_lock(&cache->reqs);
        req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
+       if (!req && cache->req_id_next > 0) {
+               xas_set(&xas, 0);
+               req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
+       }
        if (!req) {
                xa_unlock(&cache->reqs);
                return 0;
@@ -264,6 +269,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
        }
 
        xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
+       cache->req_id_next = xas.xa_index + 1;
        xa_unlock(&cache->reqs);
 
        id = xas.xa_index;