Commit 6e8778f8 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'sched-fixes'



Victor Nogueira says:

====================
net: sched: Fixes for classifiers

Four different classifiers (bpf, u32, matchall, and flower) are
calling tcf_bind_filter in their callbacks, but arent't undoing it by
calling tcf_unbind_filter if their was an error after binding.

This patch set fixes all this by calling tcf_unbind_filter in such
cases.

This set also undoes a refcount decrement in cls_u32 when an update
fails under specific conditions which are described in patch #3.

v1 -> v2:
* Remove blank line after fixes tag
* Fix reverse xmas tree issues pointed out by Simon

v2 -> v3:
* Inline functions cls_bpf_set_parms and fl_set_parms to avoid adding
  yet another parameter (and a return value at it) to them.
* Remove similar fixes for u32 and matchall, which will be sent soon,
  once we find a way to do the fixes without adding a return parameter
  to their set_parms functions.

v3 -> v4:
* Inline mall_set_parms to avoid adding yet another parameter.
* Remove set_flags parameter from u32_set_parms and create a separate
  function for calling tcf_bind_filter and tcf_unbind_filter in case of
  failure.
* Change cover letter title to also encompass refcnt fix for u32

v4 -> v5:
* Change back tag to net
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0dd1805f ac177a33
Loading
Loading
Loading
Loading
+47 −52
Original line number Diff line number Diff line
@@ -406,56 +406,6 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
	return 0;
}

static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
			     struct cls_bpf_prog *prog, unsigned long base,
			     struct nlattr **tb, struct nlattr *est, u32 flags,
			     struct netlink_ext_ack *extack)
{
	bool is_bpf, is_ebpf, have_exts = false;
	u32 gen_flags = 0;
	int ret;

	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
	is_ebpf = tb[TCA_BPF_FD];
	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
		return -EINVAL;

	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, flags,
				extack);
	if (ret < 0)
		return ret;

	if (tb[TCA_BPF_FLAGS]) {
		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);

		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
			return -EINVAL;

		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
	}
	if (tb[TCA_BPF_FLAGS_GEN]) {
		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
		    !tc_flags_valid(gen_flags))
			return -EINVAL;
	}

	prog->exts_integrated = have_exts;
	prog->gen_flags = gen_flags;

	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
		       cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
	if (ret < 0)
		return ret;

	if (tb[TCA_BPF_CLASSID]) {
		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
		tcf_bind_filter(tp, &prog->res, base);
	}

	return 0;
}

static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
			  struct tcf_proto *tp, unsigned long base,
			  u32 handle, struct nlattr **tca,
@@ -463,9 +413,12 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
			  struct netlink_ext_ack *extack)
{
	struct cls_bpf_head *head = rtnl_dereference(tp->root);
	bool is_bpf, is_ebpf, have_exts = false;
	struct cls_bpf_prog *oldprog = *arg;
	struct nlattr *tb[TCA_BPF_MAX + 1];
	bool bound_to_filter = false;
	struct cls_bpf_prog *prog;
	u32 gen_flags = 0;
	int ret;

	if (tca[TCA_OPTIONS] == NULL)
@@ -504,11 +457,51 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
		goto errout;
	prog->handle = handle;

	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], flags,
				extack);
	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
	is_ebpf = tb[TCA_BPF_FD];
	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
		ret = -EINVAL;
		goto errout_idr;
	}

	ret = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &prog->exts,
				flags, extack);
	if (ret < 0)
		goto errout_idr;

	if (tb[TCA_BPF_FLAGS]) {
		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);

		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
			ret = -EINVAL;
			goto errout_idr;
		}

		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
	}
	if (tb[TCA_BPF_FLAGS_GEN]) {
		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
		    !tc_flags_valid(gen_flags)) {
			ret = -EINVAL;
			goto errout_idr;
		}
	}

	prog->exts_integrated = have_exts;
	prog->gen_flags = gen_flags;

	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
		cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
	if (ret < 0)
		goto errout_idr;

	if (tb[TCA_BPF_CLASSID]) {
		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
		tcf_bind_filter(tp, &prog->res, base);
		bound_to_filter = true;
	}

	ret = cls_bpf_offload(tp, prog, oldprog, extack);
	if (ret)
		goto errout_parms;
@@ -530,6 +523,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
	return 0;

errout_parms:
	if (bound_to_filter)
		tcf_unbind_filter(tp, &prog->res);
	cls_bpf_free_parms(prog);
errout_idr:
	if (!oldprog)
+47 −52
Original line number Diff line number Diff line
@@ -2173,53 +2173,6 @@ static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask)
	return mask->meta.l2_miss;
}

static int fl_set_parms(struct net *net, struct tcf_proto *tp,
			struct cls_fl_filter *f, struct fl_flow_mask *mask,
			unsigned long base, struct nlattr **tb,
			struct nlattr *est,
			struct fl_flow_tmplt *tmplt,
			u32 flags, u32 fl_flags,
			struct netlink_ext_ack *extack)
{
	int err;

	err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
				   fl_flags, extack);
	if (err < 0)
		return err;

	if (tb[TCA_FLOWER_CLASSID]) {
		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
		if (flags & TCA_ACT_FLAGS_NO_RTNL)
			rtnl_lock();
		tcf_bind_filter(tp, &f->res, base);
		if (flags & TCA_ACT_FLAGS_NO_RTNL)
			rtnl_unlock();
	}

	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
	if (err)
		return err;

	fl_mask_update_range(mask);
	fl_set_masked_key(&f->mkey, &f->key, mask);

	if (!fl_mask_fits_tmplt(tmplt, mask)) {
		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
		return -EINVAL;
	}

	/* Enable tc skb extension if filter matches on data extracted from
	 * this extension.
	 */
	if (fl_needs_tc_skb_ext(&mask->key)) {
		f->needs_tc_skb_ext = 1;
		tc_skb_ext_tc_enable();
	}

	return 0;
}

static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
			       struct cls_fl_filter *fold,
			       bool *in_ht)
@@ -2251,6 +2204,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
	struct cls_fl_head *head = fl_head_dereference(tp);
	bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
	struct cls_fl_filter *fold = *arg;
	bool bound_to_filter = false;
	struct cls_fl_filter *fnew;
	struct fl_flow_mask *mask;
	struct nlattr **tb;
@@ -2335,15 +2289,46 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
	if (err < 0)
		goto errout_idr;

	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
			   tp->chain->tmplt_priv, flags, fnew->flags,
	err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE],
				   &fnew->exts, flags, fnew->flags,
				   extack);
	if (err)
	if (err < 0)
		goto errout_idr;

	if (tb[TCA_FLOWER_CLASSID]) {
		fnew->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
		if (flags & TCA_ACT_FLAGS_NO_RTNL)
			rtnl_lock();
		tcf_bind_filter(tp, &fnew->res, base);
		if (flags & TCA_ACT_FLAGS_NO_RTNL)
			rtnl_unlock();
		bound_to_filter = true;
	}

	err = fl_set_key(net, tb, &fnew->key, &mask->key, extack);
	if (err)
		goto unbind_filter;

	fl_mask_update_range(mask);
	fl_set_masked_key(&fnew->mkey, &fnew->key, mask);

	if (!fl_mask_fits_tmplt(tp->chain->tmplt_priv, mask)) {
		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
		err = -EINVAL;
		goto unbind_filter;
	}

	/* Enable tc skb extension if filter matches on data extracted from
	 * this extension.
	 */
	if (fl_needs_tc_skb_ext(&mask->key)) {
		fnew->needs_tc_skb_ext = 1;
		tc_skb_ext_tc_enable();
	}

	err = fl_check_assign_mask(head, fnew, fold, mask);
	if (err)
		goto errout_idr;
		goto unbind_filter;

	err = fl_ht_insert_unique(fnew, fold, &in_ht);
	if (err)
@@ -2434,6 +2419,16 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
				       fnew->mask->filter_ht_params);
errout_mask:
	fl_mask_put(head, fnew->mask);

unbind_filter:
	if (bound_to_filter) {
		if (flags & TCA_ACT_FLAGS_NO_RTNL)
			rtnl_lock();
		tcf_unbind_filter(tp, &fnew->res);
		if (flags & TCA_ACT_FLAGS_NO_RTNL)
			rtnl_unlock();
	}

errout_idr:
	if (!fold)
		idr_remove(&head->handle_idr, fnew->handle);
+12 −23
Original line number Diff line number Diff line
@@ -159,26 +159,6 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
	[TCA_MATCHALL_FLAGS]		= { .type = NLA_U32 },
};

static int mall_set_parms(struct net *net, struct tcf_proto *tp,
			  struct cls_mall_head *head,
			  unsigned long base, struct nlattr **tb,
			  struct nlattr *est, u32 flags, u32 fl_flags,
			  struct netlink_ext_ack *extack)
{
	int err;

	err = tcf_exts_validate_ex(net, tp, tb, est, &head->exts, flags,
				   fl_flags, extack);
	if (err < 0)
		return err;

	if (tb[TCA_MATCHALL_CLASSID]) {
		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
		tcf_bind_filter(tp, &head->res, base);
	}
	return 0;
}

static int mall_change(struct net *net, struct sk_buff *in_skb,
		       struct tcf_proto *tp, unsigned long base,
		       u32 handle, struct nlattr **tca,
@@ -187,6 +167,7 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
{
	struct cls_mall_head *head = rtnl_dereference(tp->root);
	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
	bool bound_to_filter = false;
	struct cls_mall_head *new;
	u32 userflags = 0;
	int err;
@@ -226,11 +207,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
		goto err_alloc_percpu;
	}

	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE],
			     flags, new->flags, extack);
	if (err)
	err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE],
				   &new->exts, flags, new->flags, extack);
	if (err < 0)
		goto err_set_parms;

	if (tb[TCA_MATCHALL_CLASSID]) {
		new->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
		tcf_bind_filter(tp, &new->res, base);
		bound_to_filter = true;
	}

	if (!tc_skip_hw(new->flags)) {
		err = mall_replace_hw_filter(tp, new, (unsigned long)new,
					     extack);
@@ -246,6 +233,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
	return 0;

err_replace_hw_filter:
	if (bound_to_filter)
		tcf_unbind_filter(tp, &new->res);
err_set_parms:
	free_percpu(new->pf);
err_alloc_percpu:
+37 −11
Original line number Diff line number Diff line
@@ -712,8 +712,23 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
	[TCA_U32_FLAGS]		= { .type = NLA_U32 },
};

static void u32_unbind_filter(struct tcf_proto *tp, struct tc_u_knode *n,
			      struct nlattr **tb)
{
	if (tb[TCA_U32_CLASSID])
		tcf_unbind_filter(tp, &n->res);
}

static void u32_bind_filter(struct tcf_proto *tp, struct tc_u_knode *n,
			    unsigned long base, struct nlattr **tb)
{
	if (tb[TCA_U32_CLASSID]) {
		n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
		tcf_bind_filter(tp, &n->res, base);
	}
}

static int u32_set_parms(struct net *net, struct tcf_proto *tp,
			 unsigned long base,
			 struct tc_u_knode *n, struct nlattr **tb,
			 struct nlattr *est, u32 flags, u32 fl_flags,
			 struct netlink_ext_ack *extack)
@@ -760,10 +775,6 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
		if (ht_old)
			ht_old->refcnt--;
	}
	if (tb[TCA_U32_CLASSID]) {
		n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
		tcf_bind_filter(tp, &n->res, base);
	}

	if (ifindex >= 0)
		n->ifindex = ifindex;
@@ -903,17 +914,27 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
		if (!new)
			return -ENOMEM;

		err = u32_set_parms(net, tp, base, new, tb,
				    tca[TCA_RATE], flags, new->flags,
				    extack);
		err = u32_set_parms(net, tp, new, tb, tca[TCA_RATE],
				    flags, new->flags, extack);

		if (err) {
			__u32_destroy_key(new);
			return err;
		}

		u32_bind_filter(tp, new, base, tb);

		err = u32_replace_hw_knode(tp, new, flags, extack);
		if (err) {
			u32_unbind_filter(tp, new, tb);

			if (tb[TCA_U32_LINK]) {
				struct tc_u_hnode *ht_old;

				ht_old = rtnl_dereference(n->ht_down);
				if (ht_old)
					ht_old->refcnt++;
			}
			__u32_destroy_key(new);
			return err;
		}
@@ -1074,15 +1095,18 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
	}
#endif

	err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE],
	err = u32_set_parms(net, tp, n, tb, tca[TCA_RATE],
			    flags, n->flags, extack);

	u32_bind_filter(tp, n, base, tb);

	if (err == 0) {
		struct tc_u_knode __rcu **ins;
		struct tc_u_knode *pins;

		err = u32_replace_hw_knode(tp, n, flags, extack);
		if (err)
			goto errhw;
			goto errunbind;

		if (!tc_in_hw(n->flags))
			n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
@@ -1100,7 +1124,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
		return 0;
	}

errhw:
errunbind:
	u32_unbind_filter(tp, n, tb);

#ifdef CONFIG_CLS_U32_MARK
	free_percpu(n->pcpu_success);
#endif