blob: df9a06090db613df696b90f8d7ebb406522597bf [file] [log] [blame]
Andrey Vagin0f29c762013-03-21 20:33:47 +04001#ifndef _AF_NETLINK_H
2#define _AF_NETLINK_H
3
Thomas Grafe3416942014-08-02 11:47:45 +02004#include <linux/rhashtable.h>
Johannes Bergee1c24422015-01-16 11:37:14 +01005#include <linux/atomic.h>
Andrey Vagin0f29c762013-03-21 20:33:47 +04006#include <net/sock.h>
7
8#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
9#define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
10
Patrick McHardyccdfcc32013-04-17 06:47:01 +000011struct netlink_ring {
12 void **pg_vec;
13 unsigned int head;
14 unsigned int frames_per_block;
15 unsigned int frame_size;
16 unsigned int frame_max;
17
18 unsigned int pg_vec_order;
19 unsigned int pg_vec_pages;
20 unsigned int pg_vec_len;
21
22 atomic_t pending;
23};
24
Andrey Vagin0f29c762013-03-21 20:33:47 +040025struct netlink_sock {
26 /* struct sock has to be the first member of netlink_sock */
27 struct sock sk;
28 u32 portid;
29 u32 dst_portid;
30 u32 dst_group;
31 u32 flags;
32 u32 subscriptions;
33 u32 ngroups;
34 unsigned long *groups;
35 unsigned long state;
Eric Dumazet9063e212014-03-07 12:02:33 -080036 size_t max_recvmsg_len;
Andrey Vagin0f29c762013-03-21 20:33:47 +040037 wait_queue_head_t wait;
Pravin B Shelar16b304f2013-08-15 15:31:06 -070038 bool cb_running;
39 struct netlink_callback cb;
Andrey Vagin0f29c762013-03-21 20:33:47 +040040 struct mutex *cb_mutex;
41 struct mutex cb_def_mutex;
42 void (*netlink_rcv)(struct sk_buff *skb);
Johannes Berg023e2cf2014-12-23 21:00:06 +010043 int (*netlink_bind)(struct net *net, int group);
44 void (*netlink_unbind)(struct net *net, int group);
Andrey Vagin0f29c762013-03-21 20:33:47 +040045 struct module *module;
Patrick McHardyccdfcc32013-04-17 06:47:01 +000046#ifdef CONFIG_NETLINK_MMAP
47 struct mutex pg_vec_lock;
48 struct netlink_ring rx_ring;
49 struct netlink_ring tx_ring;
50 atomic_t mapped;
51#endif /* CONFIG_NETLINK_MMAP */
Thomas Grafe3416942014-08-02 11:47:45 +020052
53 struct rhash_head node;
Thomas Graf21e49022015-01-02 23:00:22 +010054 struct rcu_head rcu;
Andrey Vagin0f29c762013-03-21 20:33:47 +040055};
56
57static inline struct netlink_sock *nlk_sk(struct sock *sk)
58{
59 return container_of(sk, struct netlink_sock, sk);
60}
61
Daniel Borkmann1853c942015-09-10 20:05:46 +020062static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb)
63{
64#ifdef CONFIG_NETLINK_MMAP
65 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
66#else
67 return false;
68#endif /* CONFIG_NETLINK_MMAP */
69}
70
Andrey Vagin0f29c762013-03-21 20:33:47 +040071struct netlink_table {
Thomas Grafe3416942014-08-02 11:47:45 +020072 struct rhashtable hash;
Andrey Vagin0f29c762013-03-21 20:33:47 +040073 struct hlist_head mc_list;
74 struct listeners __rcu *listeners;
75 unsigned int flags;
76 unsigned int groups;
77 struct mutex *cb_mutex;
78 struct module *module;
Johannes Berg023e2cf2014-12-23 21:00:06 +010079 int (*bind)(struct net *net, int group);
80 void (*unbind)(struct net *net, int group);
Gao fengda12c902013-06-06 14:49:11 +080081 bool (*compare)(struct net *net, struct sock *sock);
Andrey Vagin0f29c762013-03-21 20:33:47 +040082 int registered;
83};
84
85extern struct netlink_table *nl_table;
86extern rwlock_t nl_table_lock;
Andrey Vagin0f29c762013-03-21 20:33:47 +040087
88#endif