Commit b7e4ba9a authored by David S. Miller's avatar David S. Miller
Browse files


Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains Netfilter fixes for net:

1) Switch to RCU in x_tables to fix possible NULL pointer dereference,
   from Subash Abhinov Kasiviswanathan.

2) Fix netlink dump of dynset timeouts later than 23 days.

3) Add comment for the indirect serialization of the nft commit mutex
   with rtnl_mutex.

4) Remove bogus check for confirmed conntrack when matching on the
   conntrack ID, from Brett Mastbergen.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 88287773 2d94b20b
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
@@ -227,7 +227,7 @@ struct xt_table {
	unsigned int valid_hooks;

	/* Man behind the curtain... */
	struct xt_table_info *private;
	struct xt_table_info __rcu *private;

	/* Set this to THIS_MODULE if you are a module, otherwise NULL */
	struct module *me;
@@ -448,6 +448,9 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)

struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);

struct xt_table_info
*xt_table_get_private_protected(const struct xt_table *table);

#ifdef CONFIG_COMPAT
#include <net/compat.h>

+4 −0
Original line number Diff line number Diff line
@@ -1524,4 +1524,8 @@ void __init nft_chain_route_init(void);
void nft_chain_route_fini(void);

void nf_tables_trans_destroy_flush_work(void);

int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result);
__be64 nf_jiffies64_to_msecs(u64 input);

#endif /* _NET_NF_TABLES_H */
+7 −7
Original line number Diff line number Diff line
@@ -203,7 +203,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,

	local_bh_disable();
	addend = xt_write_recseq_begin();
	private = READ_ONCE(table->private); /* Address dependency. */
	private = rcu_access_pointer(table->private);
	cpu     = smp_processor_id();
	table_base = private->entries;
	jumpstack  = (struct arpt_entry **)private->jumpstack[cpu];
@@ -649,7 +649,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
{
	unsigned int countersize;
	struct xt_counters *counters;
	const struct xt_table_info *private = table->private;
	const struct xt_table_info *private = xt_table_get_private_protected(table);

	/* We need atomic snapshot of counters: rest doesn't change
	 * (other than comefrom, which userspace doesn't care
@@ -673,7 +673,7 @@ static int copy_entries_to_user(unsigned int total_size,
	unsigned int off, num;
	const struct arpt_entry *e;
	struct xt_counters *counters;
	struct xt_table_info *private = table->private;
	struct xt_table_info *private = xt_table_get_private_protected(table);
	int ret = 0;
	void *loc_cpu_entry;

@@ -807,7 +807,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
	t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
	if (!IS_ERR(t)) {
		struct arpt_getinfo info;
		const struct xt_table_info *private = t->private;
		const struct xt_table_info *private = xt_table_get_private_protected(t);
#ifdef CONFIG_COMPAT
		struct xt_table_info tmp;

@@ -860,7 +860,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,

	t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
	if (!IS_ERR(t)) {
		const struct xt_table_info *private = t->private;
		const struct xt_table_info *private = xt_table_get_private_protected(t);

		if (get.size == private->size)
			ret = copy_entries_to_user(private->size,
@@ -1017,7 +1017,7 @@ static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
	}

	local_bh_disable();
	private = t->private;
	private = xt_table_get_private_protected(t);
	if (private->number != tmp.num_counters) {
		ret = -EINVAL;
		goto unlock_up_free;
@@ -1330,7 +1330,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
				       void __user *userptr)
{
	struct xt_counters *counters;
	const struct xt_table_info *private = table->private;
	const struct xt_table_info *private = xt_table_get_private_protected(table);
	void __user *pos;
	unsigned int size;
	int ret = 0;
+7 −7
Original line number Diff line number Diff line
@@ -258,7 +258,7 @@ ipt_do_table(struct sk_buff *skb,
	WARN_ON(!(table->valid_hooks & (1 << hook)));
	local_bh_disable();
	addend = xt_write_recseq_begin();
	private = READ_ONCE(table->private); /* Address dependency. */
	private = rcu_access_pointer(table->private);
	cpu        = smp_processor_id();
	table_base = private->entries;
	jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
@@ -791,7 +791,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
{
	unsigned int countersize;
	struct xt_counters *counters;
	const struct xt_table_info *private = table->private;
	const struct xt_table_info *private = xt_table_get_private_protected(table);

	/* We need atomic snapshot of counters: rest doesn't change
	   (other than comefrom, which userspace doesn't care
@@ -815,7 +815,7 @@ copy_entries_to_user(unsigned int total_size,
	unsigned int off, num;
	const struct ipt_entry *e;
	struct xt_counters *counters;
	const struct xt_table_info *private = table->private;
	const struct xt_table_info *private = xt_table_get_private_protected(table);
	int ret = 0;
	const void *loc_cpu_entry;

@@ -964,7 +964,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
	t = xt_request_find_table_lock(net, AF_INET, name);
	if (!IS_ERR(t)) {
		struct ipt_getinfo info;
		const struct xt_table_info *private = t->private;
		const struct xt_table_info *private = xt_table_get_private_protected(t);
#ifdef CONFIG_COMPAT
		struct xt_table_info tmp;

@@ -1018,7 +1018,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,

	t = xt_find_table_lock(net, AF_INET, get.name);
	if (!IS_ERR(t)) {
		const struct xt_table_info *private = t->private;
		const struct xt_table_info *private = xt_table_get_private_protected(t);
		if (get.size == private->size)
			ret = copy_entries_to_user(private->size,
						   t, uptr->entrytable);
@@ -1173,7 +1173,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
	}

	local_bh_disable();
	private = t->private;
	private = xt_table_get_private_protected(t);
	if (private->number != tmp.num_counters) {
		ret = -EINVAL;
		goto unlock_up_free;
@@ -1543,7 +1543,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
			    void __user *userptr)
{
	struct xt_counters *counters;
	const struct xt_table_info *private = table->private;
	const struct xt_table_info *private = xt_table_get_private_protected(table);
	void __user *pos;
	unsigned int size;
	int ret = 0;
+7 −7
Original line number Diff line number Diff line
@@ -280,7 +280,7 @@ ip6t_do_table(struct sk_buff *skb,

	local_bh_disable();
	addend = xt_write_recseq_begin();
	private = READ_ONCE(table->private); /* Address dependency. */
	private = rcu_access_pointer(table->private);
	cpu        = smp_processor_id();
	table_base = private->entries;
	jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
@@ -807,7 +807,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
{
	unsigned int countersize;
	struct xt_counters *counters;
	const struct xt_table_info *private = table->private;
	const struct xt_table_info *private = xt_table_get_private_protected(table);

	/* We need atomic snapshot of counters: rest doesn't change
	   (other than comefrom, which userspace doesn't care
@@ -831,7 +831,7 @@ copy_entries_to_user(unsigned int total_size,
	unsigned int off, num;
	const struct ip6t_entry *e;
	struct xt_counters *counters;
	const struct xt_table_info *private = table->private;
	const struct xt_table_info *private = xt_table_get_private_protected(table);
	int ret = 0;
	const void *loc_cpu_entry;

@@ -980,7 +980,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
	t = xt_request_find_table_lock(net, AF_INET6, name);
	if (!IS_ERR(t)) {
		struct ip6t_getinfo info;
		const struct xt_table_info *private = t->private;
		const struct xt_table_info *private = xt_table_get_private_protected(t);
#ifdef CONFIG_COMPAT
		struct xt_table_info tmp;

@@ -1035,7 +1035,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,

	t = xt_find_table_lock(net, AF_INET6, get.name);
	if (!IS_ERR(t)) {
		struct xt_table_info *private = t->private;
		struct xt_table_info *private = xt_table_get_private_protected(t);
		if (get.size == private->size)
			ret = copy_entries_to_user(private->size,
						   t, uptr->entrytable);
@@ -1189,7 +1189,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
	}

	local_bh_disable();
	private = t->private;
	private = xt_table_get_private_protected(t);
	if (private->number != tmp.num_counters) {
		ret = -EINVAL;
		goto unlock_up_free;
@@ -1552,7 +1552,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
			    void __user *userptr)
{
	struct xt_counters *counters;
	const struct xt_table_info *private = table->private;
	const struct xt_table_info *private = xt_table_get_private_protected(table);
	void __user *pos;
	unsigned int size;
	int ret = 0;
Loading