Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 1 | #include <linux/types.h> |
| 2 | #include <linux/spinlock.h> |
| 3 | #include <linux/sock_diag.h> |
| 4 | #include <linux/unix_diag.h> |
| 5 | #include <linux/skbuff.h> |
Cyrill Gorcunov | 2ea744a | 2011-12-20 04:33:03 +0000 | [diff] [blame] | 6 | #include <linux/module.h> |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 7 | #include <net/netlink.h> |
| 8 | #include <net/af_unix.h> |
| 9 | #include <net/tcp_states.h> |
| 10 | |
| 11 | #define UNIX_DIAG_PUT(skb, attrtype, attrlen) \ |
| 12 | RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) |
| 13 | |
Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 14 | static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) |
| 15 | { |
| 16 | struct unix_address *addr = unix_sk(sk)->addr; |
| 17 | char *s; |
| 18 | |
| 19 | if (addr) { |
| 20 | s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short)); |
| 21 | memcpy(s, addr->name->sun_path, addr->len - sizeof(short)); |
| 22 | } |
| 23 | |
| 24 | return 0; |
| 25 | |
| 26 | rtattr_failure: |
| 27 | return -EMSGSIZE; |
| 28 | } |
| 29 | |
Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 30 | static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) |
| 31 | { |
| 32 | struct dentry *dentry = unix_sk(sk)->dentry; |
| 33 | struct unix_diag_vfs *uv; |
| 34 | |
| 35 | if (dentry) { |
| 36 | uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv)); |
| 37 | uv->udiag_vfs_ino = dentry->d_inode->i_ino; |
| 38 | uv->udiag_vfs_dev = dentry->d_sb->s_dev; |
| 39 | } |
| 40 | |
| 41 | return 0; |
| 42 | |
| 43 | rtattr_failure: |
| 44 | return -EMSGSIZE; |
| 45 | } |
| 46 | |
Pavel Emelyanov | ac02be8 | 2011-12-15 02:45:58 +0000 | [diff] [blame] | 47 | static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) |
| 48 | { |
| 49 | struct sock *peer; |
| 50 | int ino; |
| 51 | |
| 52 | peer = unix_peer_get(sk); |
| 53 | if (peer) { |
| 54 | unix_state_lock(peer); |
| 55 | ino = sock_i_ino(peer); |
| 56 | unix_state_unlock(peer); |
| 57 | sock_put(peer); |
| 58 | |
| 59 | RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino); |
| 60 | } |
| 61 | |
| 62 | return 0; |
| 63 | rtattr_failure: |
| 64 | return -EMSGSIZE; |
| 65 | } |
| 66 | |
Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 67 | static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) |
| 68 | { |
| 69 | struct sk_buff *skb; |
| 70 | u32 *buf; |
| 71 | int i; |
| 72 | |
| 73 | if (sk->sk_state == TCP_LISTEN) { |
| 74 | spin_lock(&sk->sk_receive_queue.lock); |
Pavel Emelyanov | 3b0723c | 2011-12-26 14:08:47 -0500 | [diff] [blame] | 75 | buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS, |
| 76 | sk->sk_receive_queue.qlen * sizeof(u32)); |
Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 77 | i = 0; |
| 78 | skb_queue_walk(&sk->sk_receive_queue, skb) { |
| 79 | struct sock *req, *peer; |
| 80 | |
| 81 | req = skb->sk; |
| 82 | /* |
| 83 | * The state lock is outer for the same sk's |
| 84 | * queue lock. With the other's queue locked it's |
| 85 | * OK to lock the state. |
| 86 | */ |
| 87 | unix_state_lock_nested(req); |
| 88 | peer = unix_sk(req)->peer; |
David S. Miller | e09e9d1 | 2011-12-26 14:41:55 -0500 | [diff] [blame] | 89 | buf[i++] = (peer ? sock_i_ino(peer) : 0); |
Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 90 | unix_state_unlock(req); |
| 91 | } |
| 92 | spin_unlock(&sk->sk_receive_queue.lock); |
| 93 | } |
| 94 | |
| 95 | return 0; |
| 96 | |
| 97 | rtattr_failure: |
| 98 | spin_unlock(&sk->sk_receive_queue.lock); |
| 99 | return -EMSGSIZE; |
| 100 | } |
| 101 | |
Pavel Emelyanov | cbf3919 | 2011-12-15 02:46:31 +0000 | [diff] [blame] | 102 | static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) |
| 103 | { |
| 104 | RTA_PUT_U32(nlskb, UNIX_DIAG_RQLEN, sk->sk_receive_queue.qlen); |
| 105 | return 0; |
| 106 | |
| 107 | rtattr_failure: |
| 108 | return -EMSGSIZE; |
| 109 | } |
| 110 | |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 111 | static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, |
| 112 | u32 pid, u32 seq, u32 flags, int sk_ino) |
| 113 | { |
| 114 | unsigned char *b = skb_tail_pointer(skb); |
| 115 | struct nlmsghdr *nlh; |
| 116 | struct unix_diag_msg *rep; |
| 117 | |
| 118 | nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep)); |
| 119 | nlh->nlmsg_flags = flags; |
| 120 | |
| 121 | rep = NLMSG_DATA(nlh); |
| 122 | |
| 123 | rep->udiag_family = AF_UNIX; |
| 124 | rep->udiag_type = sk->sk_type; |
| 125 | rep->udiag_state = sk->sk_state; |
| 126 | rep->udiag_ino = sk_ino; |
| 127 | sock_diag_save_cookie(sk, rep->udiag_cookie); |
| 128 | |
Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 129 | if ((req->udiag_show & UDIAG_SHOW_NAME) && |
Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame^] | 130 | sk_diag_dump_name(sk, skb)) |
Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 131 | goto nlmsg_failure; |
| 132 | |
Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 133 | if ((req->udiag_show & UDIAG_SHOW_VFS) && |
Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame^] | 134 | sk_diag_dump_vfs(sk, skb)) |
Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 135 | goto nlmsg_failure; |
| 136 | |
Pavel Emelyanov | ac02be8 | 2011-12-15 02:45:58 +0000 | [diff] [blame] | 137 | if ((req->udiag_show & UDIAG_SHOW_PEER) && |
Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame^] | 138 | sk_diag_dump_peer(sk, skb)) |
Pavel Emelyanov | ac02be8 | 2011-12-15 02:45:58 +0000 | [diff] [blame] | 139 | goto nlmsg_failure; |
| 140 | |
Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 141 | if ((req->udiag_show & UDIAG_SHOW_ICONS) && |
Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame^] | 142 | sk_diag_dump_icons(sk, skb)) |
Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 143 | goto nlmsg_failure; |
| 144 | |
Pavel Emelyanov | cbf3919 | 2011-12-15 02:46:31 +0000 | [diff] [blame] | 145 | if ((req->udiag_show & UDIAG_SHOW_RQLEN) && |
Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame^] | 146 | sk_diag_show_rqlen(sk, skb)) |
| 147 | goto nlmsg_failure; |
| 148 | |
| 149 | if ((req->udiag_show & UDIAG_SHOW_MEMINFO) && |
| 150 | sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) |
Pavel Emelyanov | cbf3919 | 2011-12-15 02:46:31 +0000 | [diff] [blame] | 151 | goto nlmsg_failure; |
| 152 | |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 153 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
| 154 | return skb->len; |
| 155 | |
| 156 | nlmsg_failure: |
| 157 | nlmsg_trim(skb, b); |
| 158 | return -EMSGSIZE; |
| 159 | } |
| 160 | |
| 161 | static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, |
| 162 | u32 pid, u32 seq, u32 flags) |
| 163 | { |
| 164 | int sk_ino; |
| 165 | |
| 166 | unix_state_lock(sk); |
| 167 | sk_ino = sock_i_ino(sk); |
| 168 | unix_state_unlock(sk); |
| 169 | |
| 170 | if (!sk_ino) |
| 171 | return 0; |
| 172 | |
| 173 | return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino); |
| 174 | } |
| 175 | |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 176 | static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) |
| 177 | { |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 178 | struct unix_diag_req *req; |
| 179 | int num, s_num, slot, s_slot; |
| 180 | |
| 181 | req = NLMSG_DATA(cb->nlh); |
| 182 | |
| 183 | s_slot = cb->args[0]; |
| 184 | num = s_num = cb->args[1]; |
| 185 | |
| 186 | spin_lock(&unix_table_lock); |
| 187 | for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) { |
| 188 | struct sock *sk; |
| 189 | struct hlist_node *node; |
| 190 | |
| 191 | num = 0; |
| 192 | sk_for_each(sk, node, &unix_socket_table[slot]) { |
| 193 | if (num < s_num) |
| 194 | goto next; |
| 195 | if (!(req->udiag_states & (1 << sk->sk_state))) |
| 196 | goto next; |
| 197 | if (sk_diag_dump(sk, skb, req, |
Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame^] | 198 | NETLINK_CB(cb->skb).pid, |
| 199 | cb->nlh->nlmsg_seq, |
| 200 | NLM_F_MULTI) < 0) |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 201 | goto done; |
| 202 | next: |
| 203 | num++; |
| 204 | } |
| 205 | } |
| 206 | done: |
| 207 | spin_unlock(&unix_table_lock); |
| 208 | cb->args[0] = slot; |
| 209 | cb->args[1] = num; |
| 210 | |
| 211 | return skb->len; |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 212 | } |
| 213 | |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 214 | static struct sock *unix_lookup_by_ino(int ino) |
| 215 | { |
| 216 | int i; |
| 217 | struct sock *sk; |
| 218 | |
| 219 | spin_lock(&unix_table_lock); |
| 220 | for (i = 0; i <= UNIX_HASH_SIZE; i++) { |
| 221 | struct hlist_node *node; |
| 222 | |
| 223 | sk_for_each(sk, node, &unix_socket_table[i]) |
| 224 | if (ino == sock_i_ino(sk)) { |
| 225 | sock_hold(sk); |
| 226 | spin_unlock(&unix_table_lock); |
| 227 | |
| 228 | return sk; |
| 229 | } |
| 230 | } |
| 231 | |
| 232 | spin_unlock(&unix_table_lock); |
| 233 | return NULL; |
| 234 | } |
| 235 | |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 236 | static int unix_diag_get_exact(struct sk_buff *in_skb, |
| 237 | const struct nlmsghdr *nlh, |
| 238 | struct unix_diag_req *req) |
| 239 | { |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 240 | int err = -EINVAL; |
| 241 | struct sock *sk; |
| 242 | struct sk_buff *rep; |
| 243 | unsigned int extra_len; |
| 244 | |
| 245 | if (req->udiag_ino == 0) |
| 246 | goto out_nosk; |
| 247 | |
| 248 | sk = unix_lookup_by_ino(req->udiag_ino); |
| 249 | err = -ENOENT; |
| 250 | if (sk == NULL) |
| 251 | goto out_nosk; |
| 252 | |
| 253 | err = sock_diag_check_cookie(sk, req->udiag_cookie); |
| 254 | if (err) |
| 255 | goto out; |
| 256 | |
| 257 | extra_len = 256; |
| 258 | again: |
| 259 | err = -ENOMEM; |
| 260 | rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)), |
| 261 | GFP_KERNEL); |
| 262 | if (!rep) |
| 263 | goto out; |
| 264 | |
| 265 | err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid, |
| 266 | nlh->nlmsg_seq, 0, req->udiag_ino); |
| 267 | if (err < 0) { |
| 268 | kfree_skb(rep); |
| 269 | extra_len += 256; |
| 270 | if (extra_len >= PAGE_SIZE) |
| 271 | goto out; |
| 272 | |
| 273 | goto again; |
| 274 | } |
| 275 | err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid, |
| 276 | MSG_DONTWAIT); |
| 277 | if (err > 0) |
| 278 | err = 0; |
| 279 | out: |
| 280 | if (sk) |
| 281 | sock_put(sk); |
| 282 | out_nosk: |
| 283 | return err; |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) |
| 287 | { |
| 288 | int hdrlen = sizeof(struct unix_diag_req); |
| 289 | |
| 290 | if (nlmsg_len(h) < hdrlen) |
| 291 | return -EINVAL; |
| 292 | |
| 293 | if (h->nlmsg_flags & NLM_F_DUMP) |
| 294 | return netlink_dump_start(sock_diag_nlsk, skb, h, |
| 295 | unix_diag_dump, NULL, 0); |
| 296 | else |
| 297 | return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h)); |
| 298 | } |
| 299 | |
| 300 | static struct sock_diag_handler unix_diag_handler = { |
| 301 | .family = AF_UNIX, |
| 302 | .dump = unix_diag_handler_dump, |
| 303 | }; |
| 304 | |
| 305 | static int __init unix_diag_init(void) |
| 306 | { |
| 307 | return sock_diag_register(&unix_diag_handler); |
| 308 | } |
| 309 | |
| 310 | static void __exit unix_diag_exit(void) |
| 311 | { |
| 312 | sock_diag_unregister(&unix_diag_handler); |
| 313 | } |
| 314 | |
| 315 | module_init(unix_diag_init); |
| 316 | module_exit(unix_diag_exit); |
| 317 | MODULE_LICENSE("GPL"); |
| 318 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */); |