]> git.baikalelectronics.ru Git - kernel.git/commitdiff
net: sched: Introduce helpers for qevent blocks
authorPetr Machata <petrm@mellanox.com>
Fri, 26 Jun 2020 22:45:26 +0000 (01:45 +0300)
committerDavid S. Miller <davem@davemloft.net>
Tue, 30 Jun 2020 00:08:28 +0000 (17:08 -0700)
Qevents are attach points for TC blocks, where filters can be put that are
executed when "interesting events" take place in a qdisc. The data to keep
and the functions to invoke to maintain a qevent will be largely the same
between qevents. Therefore introduce sched-wide helpers for qevent
management.

Currently, similarly to ingress and egress blocks of clsact pseudo-qdisc,
blocks attachment cannot be changed after the qdisc is created. To that
end, add a helper tcf_qevent_validate_change(), which verifies whether
block index attribute is not attached, or if it is, whether its value
matches the current one (i.e. there is no material change).

The function tcf_qevent_handle() should be invoked when qdisc hits the
"interesting event" corresponding to a block. This function releases root
lock for the duration of executing the attached filters, to allow packets
generated through user actions (notably mirred) to be reinserted to the
same qdisc tree.

Signed-off-by: Petr Machata <petrm@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/pkt_cls.h
net/sched/cls_api.c

index ff017e5b3ea27a9342fc7d5625087926742ce900..690a7f49c8f907993314a4d36c548b3afa9a908e 100644 (file)
@@ -32,6 +32,12 @@ struct tcf_block_ext_info {
        u32 block_index;
 };
 
+struct tcf_qevent {
+       struct tcf_block        *block;
+       struct tcf_block_ext_info info;
+       struct tcf_proto __rcu *filter_chain;
+};
+
 struct tcf_block_cb;
 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
 
@@ -553,6 +559,49 @@ int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
                          void *cb_priv, u32 *flags, unsigned int *in_hw_count);
 unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
 
+#ifdef CONFIG_NET_CLS_ACT
+int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+                   enum flow_block_binder_type binder_type,
+                   struct nlattr *block_index_attr,
+                   struct netlink_ext_ack *extack);
+void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
+int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+                              struct netlink_ext_ack *extack);
+struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+                                 spinlock_t *root_lock, struct sk_buff **to_free, int *ret);
+int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
+#else
+static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+                                 enum flow_block_binder_type binder_type,
+                                 struct nlattr *block_index_attr,
+                                 struct netlink_ext_ack *extack)
+{
+       return 0;
+}
+
+static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
+{
+}
+
+static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+                                            struct netlink_ext_ack *extack)
+{
+       return 0;
+}
+
+static inline struct sk_buff *
+tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+                 spinlock_t *root_lock, struct sk_buff **to_free, int *ret)
+{
+       return skb;
+}
+
+static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
+{
+       return 0;
+}
+#endif
+
 struct tc_cls_u32_knode {
        struct tcf_exts *exts;
        struct tcf_result *res;
index 5bfa6b985bb89a3dc80f88f36fe1c39d5bfafd2c..1b14d5f57e7f601b2e66affb2a90cd02e0a2516d 100644 (file)
@@ -3748,6 +3748,125 @@ unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
 }
 EXPORT_SYMBOL(tcf_exts_num_actions);
 
+#ifdef CONFIG_NET_CLS_ACT
+static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
+                                       u32 *p_block_index,
+                                       struct netlink_ext_ack *extack)
+{
+       *p_block_index = nla_get_u32(block_index_attr);
+       if (!*p_block_index) {
+               NL_SET_ERR_MSG(extack, "Block number may not be zero");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+                   enum flow_block_binder_type binder_type,
+                   struct nlattr *block_index_attr,
+                   struct netlink_ext_ack *extack)
+{
+       u32 block_index;
+       int err;
+
+       if (!block_index_attr)
+               return 0;
+
+       err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
+       if (err)
+               return err;
+
+       if (!block_index)
+               return 0;
+
+       qe->info.binder_type = binder_type;
+       qe->info.chain_head_change = tcf_chain_head_change_dflt;
+       qe->info.chain_head_change_priv = &qe->filter_chain;
+       qe->info.block_index = block_index;
+
+       return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
+}
+EXPORT_SYMBOL(tcf_qevent_init);
+
+void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
+{
+       if (qe->info.block_index)
+               tcf_block_put_ext(qe->block, sch, &qe->info);
+}
+EXPORT_SYMBOL(tcf_qevent_destroy);
+
+int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+                              struct netlink_ext_ack *extack)
+{
+       u32 block_index;
+       int err;
+
+       if (!block_index_attr)
+               return 0;
+
+       err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
+       if (err)
+               return err;
+
+       /* Bounce newly-configured block or change in block. */
+       if (block_index != qe->info.block_index) {
+               NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(tcf_qevent_validate_change);
+
+struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+                                 spinlock_t *root_lock, struct sk_buff **to_free, int *ret)
+{
+       struct tcf_result cl_res;
+       struct tcf_proto *fl;
+
+       if (!qe->info.block_index)
+               return skb;
+
+       fl = rcu_dereference_bh(qe->filter_chain);
+
+       if (root_lock)
+               spin_unlock(root_lock);
+
+       switch (tcf_classify(skb, fl, &cl_res, false)) {
+       case TC_ACT_SHOT:
+               qdisc_qstats_drop(sch);
+               __qdisc_drop(skb, to_free);
+               *ret = __NET_XMIT_BYPASS;
+               return NULL;
+       case TC_ACT_STOLEN:
+       case TC_ACT_QUEUED:
+       case TC_ACT_TRAP:
+               __qdisc_drop(skb, to_free);
+               *ret = __NET_XMIT_STOLEN;
+               return NULL;
+       case TC_ACT_REDIRECT:
+               skb_do_redirect(skb);
+               *ret = __NET_XMIT_STOLEN;
+               return NULL;
+       }
+
+       if (root_lock)
+               spin_lock(root_lock);
+
+       return skb;
+}
+EXPORT_SYMBOL(tcf_qevent_handle);
+
+int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
+{
+       if (!qe->info.block_index)
+               return 0;
+       return nla_put_u32(skb, attr_name, qe->info.block_index);
+}
+EXPORT_SYMBOL(tcf_qevent_dump);
+#endif
+
 static __net_init int tcf_net_init(struct net *net)
 {
        struct tcf_net *tn = net_generic(net, tcf_net_id);