]> git.baikalelectronics.ru Git - kernel.git/commitdiff
kyber: avoid q->disk dereferences in trace points
authorChristoph Hellwig <hch@lst.de>
Tue, 12 Oct 2021 09:33:01 +0000 (11:33 +0200)
committerJens Axboe <axboe@kernel.dk>
Sat, 16 Oct 2021 03:02:57 +0000 (21:02 -0600)
q->disk becomes invalid after the gendisk is removed.  Work around this
by caching the dev_t for the tracepoints.  The real fix would be to
properly tear down the I/O schedulers with the gendisk, but that is
a much more invasive change.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20211012093301.GA27795@lst.de
Tested-by: Yi Zhang <yi.zhang@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/kyber-iosched.c
include/trace/events/kyber.h

index 15a8be57203d6420df3d73a37736adda1a168019..a0ffbabfac2c61c12139163b595694bb0d8f6493 100644 (file)
@@ -151,6 +151,7 @@ struct kyber_ctx_queue {
 
 struct kyber_queue_data {
        struct request_queue *q;
+       dev_t dev;
 
        /*
         * Each scheduling domain has a limited number of in-flight requests
@@ -257,7 +258,7 @@ static int calculate_percentile(struct kyber_queue_data *kqd,
        }
        memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
 
-       trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
+       trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
                            kyber_latency_type_names[type], percentile,
                            bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
 
@@ -270,7 +271,7 @@ static void kyber_resize_domain(struct kyber_queue_data *kqd,
        depth = clamp(depth, 1U, kyber_depth[sched_domain]);
        if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
                sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
-               trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
+               trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
                                   depth);
        }
 }
@@ -366,6 +367,7 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
                goto err;
 
        kqd->q = q;
+       kqd->dev = disk_devt(q->disk);
 
        kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
                                            GFP_KERNEL | __GFP_ZERO);
@@ -774,7 +776,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
                        list_del_init(&rq->queuelist);
                        return rq;
                } else {
-                       trace_kyber_throttled(kqd->q,
+                       trace_kyber_throttled(kqd->dev,
                                              kyber_domain_names[khd->cur_domain]);
                }
        } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
@@ -787,7 +789,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
                        list_del_init(&rq->queuelist);
                        return rq;
                } else {
-                       trace_kyber_throttled(kqd->q,
+                       trace_kyber_throttled(kqd->dev,
                                              kyber_domain_names[khd->cur_domain]);
                }
        }
index 491098a0d8ed94fd691ca8e494ef7f0bba6c055e..bf7533f171ff9c6542203d31599956227c11cc66 100644 (file)
 
 TRACE_EVENT(kyber_latency,
 
-       TP_PROTO(struct request_queue *q, const char *domain, const char *type,
+       TP_PROTO(dev_t dev, const char *domain, const char *type,
                 unsigned int percentile, unsigned int numerator,
                 unsigned int denominator, unsigned int samples),
 
-       TP_ARGS(q, domain, type, percentile, numerator, denominator, samples),
+       TP_ARGS(dev, domain, type, percentile, numerator, denominator, samples),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                             )
@@ -30,7 +30,7 @@ TRACE_EVENT(kyber_latency,
        ),
 
        TP_fast_assign(
-               __entry->dev            = disk_devt(q->disk);
+               __entry->dev            = dev;
                strlcpy(__entry->domain, domain, sizeof(__entry->domain));
                strlcpy(__entry->type, type, sizeof(__entry->type));
                __entry->percentile     = percentile;
@@ -47,10 +47,9 @@ TRACE_EVENT(kyber_latency,
 
 TRACE_EVENT(kyber_adjust,
 
-       TP_PROTO(struct request_queue *q, const char *domain,
-                unsigned int depth),
+       TP_PROTO(dev_t dev, const char *domain, unsigned int depth),
 
-       TP_ARGS(q, domain, depth),
+       TP_ARGS(dev, domain, depth),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
@@ -59,7 +58,7 @@ TRACE_EVENT(kyber_adjust,
        ),
 
        TP_fast_assign(
-               __entry->dev            = disk_devt(q->disk);
+               __entry->dev            = dev;
                strlcpy(__entry->domain, domain, sizeof(__entry->domain));
                __entry->depth          = depth;
        ),
@@ -71,9 +70,9 @@ TRACE_EVENT(kyber_adjust,
 
 TRACE_EVENT(kyber_throttled,
 
-       TP_PROTO(struct request_queue *q, const char *domain),
+       TP_PROTO(dev_t dev, const char *domain),
 
-       TP_ARGS(q, domain),
+       TP_ARGS(dev, domain),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
@@ -81,7 +80,7 @@ TRACE_EVENT(kyber_throttled,
        ),
 
        TP_fast_assign(
-               __entry->dev            = disk_devt(q->disk);
+               __entry->dev            = dev;
                strlcpy(__entry->domain, domain, sizeof(__entry->domain));
        ),