Commit 2ed08b5e authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'Managed-Neighbor-Entries'



Daniel Borkmann says:

====================
Managed Neighbor Entries

This series adds a couple of fixes related to NTF_EXT_LEARNED and NTF_USE
neighbor flags, extends the UAPI with a new NDA_FLAGS_EXT netlink attribute
in order to be able to add new neighbor flags from user space given all
current struct ndmsg / ndm_flags bits are used up. Finally, the core of this
series adds a new NTF_EXT_MANAGED flag to neighbors, which allows user space
control planes to add 'managed' neighbor entries. Meaning, user space may
either transition existing entries or can push down new L3 entries without
lladdr into the kernel where the latter will periodically try to keep such
NTF_EXT_MANAGED managed entries in reachable state. Main use case for this
series are XDP / tc BPF load-balancers which make use of the bpf_fib_lookup()
helper for backends. For more details, please see individual patches. Thanks!
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7bb39a39 7482e384
Loading
Loading
Loading
Loading
+23 −11
Original line number Diff line number Diff line
@@ -144,17 +144,18 @@ struct neighbour {
	struct timer_list	timer;
	unsigned long		used;
	atomic_t		probes;
	__u8			flags;
	__u8			nud_state;
	__u8			type;
	__u8			dead;
	u8			nud_state;
	u8			type;
	u8			dead;
	u8			protocol;
	u32			flags;
	seqlock_t		ha_lock;
	unsigned char		ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
	struct hh_cache		hh;
	int			(*output)(struct neighbour *, struct sk_buff *);
	const struct neigh_ops	*ops;
	struct list_head	gc_list;
	struct list_head	managed_list;
	struct rcu_head		rcu;
	struct net_device	*dev;
	u8			primary_key[0];
@@ -172,7 +173,7 @@ struct pneigh_entry {
	struct pneigh_entry	*next;
	possible_net_t		net;
	struct net_device	*dev;
	u8			flags;
	u32			flags;
	u8			protocol;
	u8			key[];
};
@@ -216,11 +217,13 @@ struct neigh_table {
	int			gc_thresh3;
	unsigned long		last_flush;
	struct delayed_work	gc_work;
	struct delayed_work	managed_work;
	struct timer_list 	proxy_timer;
	struct sk_buff_head	proxy_queue;
	atomic_t		entries;
	atomic_t		gc_entries;
	struct list_head	gc_list;
	struct list_head	managed_list;
	rwlock_t		lock;
	unsigned long		last_rand;
	struct neigh_statistics	__percpu *stats;
@@ -250,12 +253,21 @@ static inline void *neighbour_priv(const struct neighbour *n)
}

/* flags for neigh_update() */
#define NEIGH_UPDATE_F_OVERRIDE			0x00000001
#define NEIGH_UPDATE_F_WEAK_OVERRIDE		0x00000002
#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER	0x00000004
#define NEIGH_UPDATE_F_EXT_LEARNED		0x20000000
#define NEIGH_UPDATE_F_ISROUTER			0x40000000
#define NEIGH_UPDATE_F_ADMIN			0x80000000
#define NEIGH_UPDATE_F_OVERRIDE			BIT(0)
#define NEIGH_UPDATE_F_WEAK_OVERRIDE		BIT(1)
#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER	BIT(2)
#define NEIGH_UPDATE_F_USE			BIT(3)
#define NEIGH_UPDATE_F_MANAGED			BIT(4)
#define NEIGH_UPDATE_F_EXT_LEARNED		BIT(5)
#define NEIGH_UPDATE_F_ISROUTER			BIT(6)
#define NEIGH_UPDATE_F_ADMIN			BIT(7)

/* In-kernel representation for NDA_FLAGS_EXT flags: */
#define NTF_OLD_MASK		0xff
#define NTF_EXT_SHIFT		8
#define NTF_EXT_MASK		(NTF_EXT_MANAGED)

#define NTF_MANAGED		(NTF_EXT_MANAGED << NTF_EXT_SHIFT)

extern const struct nla_policy nda_policy[];

+24 −11
Original line number Diff line number Diff line
@@ -31,6 +31,7 @@ enum {
	NDA_PROTOCOL,  /* Originator of entry */
	NDA_NH_ID,
	NDA_FDB_EXT_ATTRS,
	NDA_FLAGS_EXT,
	__NDA_MAX
};

@@ -40,14 +41,16 @@ enum {
 *	Neighbor Cache Entry Flags
 */

#define NTF_USE		0x01
#define NTF_SELF	0x02
#define NTF_MASTER	0x04
#define NTF_PROXY	0x08	/* == ATF_PUBL */
#define NTF_EXT_LEARNED	0x10
#define NTF_OFFLOADED   0x20
#define NTF_STICKY	0x40
#define NTF_ROUTER	0x80
#define NTF_USE		(1 << 0)
#define NTF_SELF	(1 << 1)
#define NTF_MASTER	(1 << 2)
#define NTF_PROXY	(1 << 3)	/* == ATF_PUBL */
#define NTF_EXT_LEARNED	(1 << 4)
#define NTF_OFFLOADED   (1 << 5)
#define NTF_STICKY	(1 << 6)
#define NTF_ROUTER	(1 << 7)
/* Extended flags under NDA_FLAGS_EXT: */
#define NTF_EXT_MANAGED	(1 << 0)

/*
 *	Neighbor Cache Entry States.
@@ -65,12 +68,22 @@ enum {
#define NUD_PERMANENT	0x80
#define NUD_NONE	0x00

/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change
 * and make no address resolution or NUD.
 * NUD_PERMANENT also cannot be deleted by garbage collectors.
/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change and make no
 * address resolution or NUD.
 *
 * NUD_PERMANENT also cannot be deleted by garbage collectors. This holds true
 * for dynamic entries with NTF_EXT_LEARNED flag as well. However, upon carrier
 * down event, NUD_PERMANENT entries are not flushed whereas NTF_EXT_LEARNED
 * flagged entries explicitly are (which is also consistent with the routing
 * subsystem).
 *
 * When NTF_EXT_LEARNED is set for a bridge fdb entry the different cache entry
 * states don't make sense and thus are ignored. Such entries don't age and
 * can roam.
 *
 * NTF_EXT_MANAGED flagged neigbor entries are managed by the kernel on behalf
 * of a user space control plane, and automatically refreshed so that (if
 * possible) they remain in NUD_REACHABLE state.
 */

struct nda_cacheinfo {
+140 −56
Original line number Diff line number Diff line
@@ -122,6 +122,8 @@ static void neigh_mark_dead(struct neighbour *n)
		list_del_init(&n->gc_list);
		atomic_dec(&n->tbl->gc_entries);
	}
	if (!list_empty(&n->managed_list))
		list_del_init(&n->managed_list);
}

static void neigh_update_gc_list(struct neighbour *n)
@@ -130,7 +132,6 @@ static void neigh_update_gc_list(struct neighbour *n)

	write_lock_bh(&n->tbl->lock);
	write_lock(&n->lock);

	if (n->dead)
		goto out;

@@ -149,32 +150,59 @@ static void neigh_update_gc_list(struct neighbour *n)
		list_add_tail(&n->gc_list, &n->tbl->gc_list);
		atomic_inc(&n->tbl->gc_entries);
	}
out:
	write_unlock(&n->lock);
	write_unlock_bh(&n->tbl->lock);
}

static void neigh_update_managed_list(struct neighbour *n)
{
	bool on_managed_list, add_to_managed;

	write_lock_bh(&n->tbl->lock);
	write_lock(&n->lock);
	if (n->dead)
		goto out;

	add_to_managed = n->flags & NTF_MANAGED;
	on_managed_list = !list_empty(&n->managed_list);

	if (!add_to_managed && on_managed_list)
		list_del_init(&n->managed_list);
	else if (add_to_managed && !on_managed_list)
		list_add_tail(&n->managed_list, &n->tbl->managed_list);
out:
	write_unlock(&n->lock);
	write_unlock_bh(&n->tbl->lock);
}

static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
				     int *notify)
static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
			       bool *gc_update, bool *managed_update)
{
	bool rc = false;
	u8 ndm_flags;
	u32 ndm_flags, old_flags = neigh->flags;

	if (!(flags & NEIGH_UPDATE_F_ADMIN))
		return rc;
		return;

	ndm_flags  = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
	if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
	ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;

	if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
		if (ndm_flags & NTF_EXT_LEARNED)
			neigh->flags |= NTF_EXT_LEARNED;
		else
			neigh->flags &= ~NTF_EXT_LEARNED;
		rc = true;
		*notify = 1;
		*gc_update = true;
	}
	if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
		if (ndm_flags & NTF_MANAGED)
			neigh->flags |= NTF_MANAGED;
		else
			neigh->flags &= ~NTF_MANAGED;
		*notify = 1;
		*managed_update = true;
	}

	return rc;
}

static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
@@ -379,7 +407,7 @@ EXPORT_SYMBOL(neigh_ifdown);

static struct neighbour *neigh_alloc(struct neigh_table *tbl,
				     struct net_device *dev,
				     bool exempt_from_gc)
				     u32 flags, bool exempt_from_gc)
{
	struct neighbour *n = NULL;
	unsigned long now = jiffies;
@@ -412,6 +440,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl,
	n->updated	  = n->used = now;
	n->nud_state	  = NUD_NONE;
	n->output	  = neigh_blackhole;
	n->flags	  = flags;
	seqlock_init(&n->hh.hh_lock);
	n->parms	  = neigh_parms_clone(&tbl->parms);
	timer_setup(&n->timer, neigh_timer_handler, 0);
@@ -421,6 +450,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl,
	refcount_set(&n->refcnt, 1);
	n->dead		  = 1;
	INIT_LIST_HEAD(&n->gc_list);
	INIT_LIST_HEAD(&n->managed_list);

	atomic_inc(&tbl->entries);
out:
@@ -575,19 +605,18 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
}
EXPORT_SYMBOL(neigh_lookup_nodev);

static struct neighbour *___neigh_create(struct neigh_table *tbl,
					 const void *pkey,
					 struct net_device *dev,
static struct neighbour *
___neigh_create(struct neigh_table *tbl, const void *pkey,
		struct net_device *dev, u32 flags,
		bool exempt_from_gc, bool want_ref)
{
	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc);
	u32 hash_val;
	unsigned int key_len = tbl->key_len;
	int error;
	u32 hash_val, key_len = tbl->key_len;
	struct neighbour *n1, *rc, *n;
	struct neigh_hash_table *nht;
	int error;

	n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
	trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);

	if (!n) {
		rc = ERR_PTR(-ENOBUFS);
		goto out;
@@ -650,7 +679,8 @@ static struct neighbour *___neigh_create(struct neigh_table *tbl,
	n->dead = 0;
	if (!exempt_from_gc)
		list_add_tail(&n->gc_list, &n->tbl->gc_list);

	if (n->flags & NTF_MANAGED)
		list_add_tail(&n->managed_list, &n->tbl->managed_list);
	if (want_ref)
		neigh_hold(n);
	rcu_assign_pointer(n->next,
@@ -674,7 +704,7 @@ static struct neighbour *___neigh_create(struct neigh_table *tbl,
struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
				 struct net_device *dev, bool want_ref)
{
	return ___neigh_create(tbl, pkey, dev, false, want_ref);
	return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
}
EXPORT_SYMBOL(__neigh_create);

@@ -1205,8 +1235,6 @@ static void neigh_update_hhs(struct neighbour *neigh)
	}
}



/* Generic update routine.
   -- lladdr is new lladdr or NULL, if it is not supplied.
   -- new    is new state.
@@ -1217,7 +1245,8 @@ static void neigh_update_hhs(struct neighbour *neigh)
				lladdr instead of overriding it
				if it is different.
	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.

	NEIGH_UPDATE_F_USE	means that the entry is user triggered.
	NEIGH_UPDATE_F_MANAGED	means that the entry will be auto-refreshed.
	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
				NTF_ROUTER flag.
	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
@@ -1225,17 +1254,15 @@ static void neigh_update_hhs(struct neighbour *neigh)

   Caller MUST hold reference count on the entry.
 */

static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
			  u8 new, u32 flags, u32 nlmsg_pid,
			  struct netlink_ext_ack *extack)
{
	bool ext_learn_change = false;
	u8 old;
	int err;
	int notify = 0;
	struct net_device *dev;
	bool gc_update = false, managed_update = false;
	int update_isrouter = 0;
	struct net_device *dev;
	int err, notify = 0;
	u8 old;

	trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);

@@ -1254,7 +1281,13 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
	    (old & (NUD_NOARP | NUD_PERMANENT)))
		goto out;

	ext_learn_change = neigh_update_ext_learned(neigh, flags, &notify);
	neigh_update_flags(neigh, flags, &notify, &gc_update, &managed_update);
	if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
		new = old & ~NUD_PERMANENT;
		neigh->nud_state = new;
		err = 0;
		goto out;
	}

	if (!(new & NUD_VALID)) {
		neigh_del_timer(neigh);
@@ -1399,15 +1432,13 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
	if (update_isrouter)
		neigh_update_is_router(neigh, flags, &notify);
	write_unlock_bh(&neigh->lock);

	if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
	if (((new ^ old) & NUD_PERMANENT) || gc_update)
		neigh_update_gc_list(neigh);

	if (managed_update)
		neigh_update_managed_list(neigh);
	if (notify)
		neigh_update_notify(neigh, nlmsg_pid);

	trace_neigh_update_done(neigh, err);

	return err;
}

@@ -1533,6 +1564,20 @@ int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
}
EXPORT_SYMBOL(neigh_direct_output);

static void neigh_managed_work(struct work_struct *work)
{
	struct neigh_table *tbl = container_of(work, struct neigh_table,
					       managed_work.work);
	struct neighbour *neigh;

	write_lock_bh(&tbl->lock);
	list_for_each_entry(neigh, &tbl->managed_list, managed_list)
		neigh_event_send(neigh, NULL);
	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
			   NEIGH_VAR(&tbl->parms, DELAY_PROBE_TIME));
	write_unlock_bh(&tbl->lock);
}

static void neigh_proxy_process(struct timer_list *t)
{
	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
@@ -1679,6 +1724,8 @@ void neigh_table_init(int index, struct neigh_table *tbl)

	INIT_LIST_HEAD(&tbl->parms_list);
	INIT_LIST_HEAD(&tbl->gc_list);
	INIT_LIST_HEAD(&tbl->managed_list);

	list_add(&tbl->parms.list, &tbl->parms_list);
	write_pnet(&tbl->parms.net, &init_net);
	refcount_set(&tbl->parms.refcnt, 1);
@@ -1710,9 +1757,13 @@ void neigh_table_init(int index, struct neigh_table *tbl)
		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);

	rwlock_init(&tbl->lock);

	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
			tbl->parms.reachable_time);
	INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);

	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
	skb_queue_head_init_class(&tbl->proxy_queue,
			&neigh_table_proxy_queue_class);
@@ -1783,6 +1834,7 @@ const struct nla_policy nda_policy[NDA_MAX+1] = {
	[NDA_MASTER]		= { .type = NLA_U32 },
	[NDA_PROTOCOL]		= { .type = NLA_U8 },
	[NDA_NH_ID]		= { .type = NLA_U32 },
	[NDA_FLAGS_EXT]		= { .type = NLA_U32 },
	[NDA_FDB_EXT_ATTRS]	= { .type = NLA_NESTED },
};

@@ -1864,6 +1916,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
	struct neighbour *neigh;
	void *dst, *lladdr;
	u8 protocol = 0;
	u32 ndm_flags;
	int err;

	ASSERT_RTNL();
@@ -1879,6 +1932,16 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
	}

	ndm = nlmsg_data(nlh);
	ndm_flags = ndm->ndm_flags;
	if (tb[NDA_FLAGS_EXT]) {
		u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);

		if (ext & ~NTF_EXT_MASK) {
			NL_SET_ERR_MSG(extack, "Invalid extended flags");
			goto out;
		}
		ndm_flags |= (ext << NTF_EXT_SHIFT);
	}
	if (ndm->ndm_ifindex) {
		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
		if (dev == NULL) {
@@ -1906,14 +1969,18 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,

	if (tb[NDA_PROTOCOL])
		protocol = nla_get_u8(tb[NDA_PROTOCOL]);

	if (ndm->ndm_flags & NTF_PROXY) {
	if (ndm_flags & NTF_PROXY) {
		struct pneigh_entry *pn;

		if (ndm_flags & NTF_MANAGED) {
			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
			goto out;
		}

		err = -ENOBUFS;
		pn = pneigh_lookup(tbl, net, dst, dev, 1);
		if (pn) {
			pn->flags = ndm->ndm_flags;
			pn->flags = ndm_flags;
			if (protocol)
				pn->protocol = protocol;
			err = 0;
@@ -1941,8 +2008,11 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
		}

		exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
				 ndm->ndm_flags & NTF_EXT_LEARNED;
		neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true);
				 ndm_flags & NTF_EXT_LEARNED;
		neigh = ___neigh_create(tbl, dst, dev,
					ndm_flags &
					(NTF_EXT_LEARNED | NTF_MANAGED),
					exempt_from_gc, true);
		if (IS_ERR(neigh)) {
			err = PTR_ERR(neigh);
			goto out;
@@ -1961,22 +2031,22 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,

	if (protocol)
		neigh->protocol = protocol;

	if (ndm->ndm_flags & NTF_EXT_LEARNED)
	if (ndm_flags & NTF_EXT_LEARNED)
		flags |= NEIGH_UPDATE_F_EXT_LEARNED;

	if (ndm->ndm_flags & NTF_ROUTER)
	if (ndm_flags & NTF_ROUTER)
		flags |= NEIGH_UPDATE_F_ISROUTER;
	if (ndm_flags & NTF_MANAGED)
		flags |= NEIGH_UPDATE_F_MANAGED;
	if (ndm_flags & NTF_USE)
		flags |= NEIGH_UPDATE_F_USE;

	if (ndm->ndm_flags & NTF_USE) {
		neigh_event_send(neigh, NULL);
		err = 0;
	} else
	err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
			     NETLINK_CB(skb).portid, extack);

	if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
		neigh_event_send(neigh, NULL);
		err = 0;
	}
	neigh_release(neigh);

out:
	return err;
}
@@ -2427,6 +2497,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
			   u32 pid, u32 seq, int type, unsigned int flags)
{
	u32 neigh_flags, neigh_flags_ext;
	unsigned long now = jiffies;
	struct nda_cacheinfo ci;
	struct nlmsghdr *nlh;
@@ -2436,11 +2507,14 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
	if (nlh == NULL)
		return -EMSGSIZE;

	neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
	neigh_flags     = neigh->flags & NTF_OLD_MASK;

	ndm = nlmsg_data(nlh);
	ndm->ndm_family	 = neigh->ops->family;
	ndm->ndm_pad1    = 0;
	ndm->ndm_pad2    = 0;
	ndm->ndm_flags	 = neigh->flags;
	ndm->ndm_flags	 = neigh_flags;
	ndm->ndm_type	 = neigh->type;
	ndm->ndm_ifindex = neigh->dev->ifindex;

@@ -2471,6 +2545,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,

	if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
		goto nla_put_failure;
	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
		goto nla_put_failure;

	nlmsg_end(skb, nlh);
	return 0;
@@ -2484,6 +2560,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
			    u32 pid, u32 seq, int type, unsigned int flags,
			    struct neigh_table *tbl)
{
	u32 neigh_flags, neigh_flags_ext;
	struct nlmsghdr *nlh;
	struct ndmsg *ndm;

@@ -2491,11 +2568,14 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
	if (nlh == NULL)
		return -EMSGSIZE;

	neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
	neigh_flags     = pn->flags & NTF_OLD_MASK;

	ndm = nlmsg_data(nlh);
	ndm->ndm_family	 = tbl->family;
	ndm->ndm_pad1    = 0;
	ndm->ndm_pad2    = 0;
	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
	ndm->ndm_flags	 = neigh_flags | NTF_PROXY;
	ndm->ndm_type	 = RTN_UNICAST;
	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
	ndm->ndm_state	 = NUD_NONE;
@@ -2505,6 +2585,8 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,

	if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
		goto nla_put_failure;
	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
		goto nla_put_failure;

	nlmsg_end(skb, nlh);
	return 0;
@@ -2820,6 +2902,7 @@ static inline size_t neigh_nlmsg_size(void)
	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
	       + nla_total_size(sizeof(struct nda_cacheinfo))
	       + nla_total_size(4)  /* NDA_PROBES */
	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
	       + nla_total_size(1); /* NDA_PROTOCOL */
}

@@ -2848,6 +2931,7 @@ static inline size_t pneigh_nlmsg_size(void)
{
	return NLMSG_ALIGN(sizeof(struct ndmsg))
	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
	       + nla_total_size(1); /* NDA_PROTOCOL */
}