blob: 6cf02b41ef951b0e8da9fd7906aada6a487baabb [file] [log] [blame]
Jennifer Hunteac37312007-02-08 13:51:54 -08001/*
2 * linux/net/iucv/af_iucv.c
3 *
4 * IUCV protocol stack for Linux on zSeries
5 *
6 * Copyright 2006 IBM Corporation
7 *
8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
9 */
10
Ursula Braun8f7c5022008-12-25 13:39:47 +010011#define KMSG_COMPONENT "af_iucv"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
Jennifer Hunteac37312007-02-08 13:51:54 -080014#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/list.h>
17#include <linux/errno.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/skbuff.h>
22#include <linux/init.h>
23#include <linux/poll.h>
24#include <net/sock.h>
25#include <asm/ebcdic.h>
26#include <asm/cpcmd.h>
27#include <linux/kmod.h>
28
29#include <net/iucv/iucv.h>
30#include <net/iucv/af_iucv.h>
31
32#define CONFIG_IUCV_SOCK_DEBUG 1
33
34#define IPRMDATA 0x80
35#define VERSION "1.0"
36
37static char iucv_userid[80];
38
39static struct proto_ops iucv_sock_ops;
40
41static struct proto iucv_proto = {
42 .name = "AF_IUCV",
43 .owner = THIS_MODULE,
44 .obj_size = sizeof(struct iucv_sock),
45};
46
Heiko Carstens57f20442007-10-08 02:02:52 -070047static void iucv_sock_kill(struct sock *sk);
48static void iucv_sock_close(struct sock *sk);
49
Jennifer Hunteac37312007-02-08 13:51:54 -080050/* Call Back functions */
51static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
52static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
53static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
Heiko Carstensda99f052007-05-04 12:23:27 -070054static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
55 u8 ipuser[16]);
Jennifer Hunteac37312007-02-08 13:51:54 -080056static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
Hendrik Brueckneraf88b522009-04-21 23:26:21 +000057static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
Jennifer Hunteac37312007-02-08 13:51:54 -080058
59static struct iucv_sock_list iucv_sk_list = {
Robert P. J. Day3db8ce32008-04-10 02:11:24 -070060 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
Jennifer Hunteac37312007-02-08 13:51:54 -080061 .autobind_name = ATOMIC_INIT(0)
62};
63
64static struct iucv_handler af_iucv_handler = {
65 .path_pending = iucv_callback_connreq,
66 .path_complete = iucv_callback_connack,
67 .path_severed = iucv_callback_connrej,
68 .message_pending = iucv_callback_rx,
Hendrik Brueckneraf88b522009-04-21 23:26:21 +000069 .message_complete = iucv_callback_txdone,
70 .path_quiesced = iucv_callback_shutdown,
Jennifer Hunteac37312007-02-08 13:51:54 -080071};
72
73static inline void high_nmcpy(unsigned char *dst, char *src)
74{
75 memcpy(dst, src, 8);
76}
77
78static inline void low_nmcpy(unsigned char *dst, char *src)
79{
80 memcpy(&dst[8], src, 8);
81}
82
83/* Timers */
84static void iucv_sock_timeout(unsigned long arg)
85{
86 struct sock *sk = (struct sock *)arg;
87
88 bh_lock_sock(sk);
89 sk->sk_err = ETIMEDOUT;
90 sk->sk_state_change(sk);
91 bh_unlock_sock(sk);
92
93 iucv_sock_kill(sk);
94 sock_put(sk);
95}
96
97static void iucv_sock_clear_timer(struct sock *sk)
98{
99 sk_stop_timer(sk, &sk->sk_timer);
100}
101
Jennifer Hunteac37312007-02-08 13:51:54 -0800102static struct sock *__iucv_get_sock_by_name(char *nm)
103{
104 struct sock *sk;
105 struct hlist_node *node;
106
107 sk_for_each(sk, node, &iucv_sk_list.head)
108 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
109 return sk;
110
111 return NULL;
112}
113
114static void iucv_sock_destruct(struct sock *sk)
115{
116 skb_queue_purge(&sk->sk_receive_queue);
117 skb_queue_purge(&sk->sk_write_queue);
118}
119
120/* Cleanup Listen */
121static void iucv_sock_cleanup_listen(struct sock *parent)
122{
123 struct sock *sk;
124
125 /* Close non-accepted connections */
126 while ((sk = iucv_accept_dequeue(parent, NULL))) {
127 iucv_sock_close(sk);
128 iucv_sock_kill(sk);
129 }
130
131 parent->sk_state = IUCV_CLOSED;
132 sock_set_flag(parent, SOCK_ZAPPED);
133}
134
135/* Kill socket */
136static void iucv_sock_kill(struct sock *sk)
137{
138 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
139 return;
140
141 iucv_sock_unlink(&iucv_sk_list, sk);
142 sock_set_flag(sk, SOCK_DEAD);
143 sock_put(sk);
144}
145
146/* Close an IUCV socket */
147static void iucv_sock_close(struct sock *sk)
148{
149 unsigned char user_data[16];
150 struct iucv_sock *iucv = iucv_sk(sk);
151 int err;
Jennifer Hunt561e0362007-05-04 12:22:07 -0700152 unsigned long timeo;
Jennifer Hunteac37312007-02-08 13:51:54 -0800153
154 iucv_sock_clear_timer(sk);
155 lock_sock(sk);
156
Heiko Carstensda99f052007-05-04 12:23:27 -0700157 switch (sk->sk_state) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800158 case IUCV_LISTEN:
159 iucv_sock_cleanup_listen(sk);
160 break;
161
162 case IUCV_CONNECTED:
163 case IUCV_DISCONN:
164 err = 0;
Jennifer Hunt561e0362007-05-04 12:22:07 -0700165
166 sk->sk_state = IUCV_CLOSING;
167 sk->sk_state_change(sk);
168
Heiko Carstensda99f052007-05-04 12:23:27 -0700169 if (!skb_queue_empty(&iucv->send_skb_q)) {
Jennifer Hunt561e0362007-05-04 12:22:07 -0700170 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
171 timeo = sk->sk_lingertime;
172 else
173 timeo = IUCV_DISCONN_TIMEOUT;
174 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
175 }
176
177 sk->sk_state = IUCV_CLOSED;
178 sk->sk_state_change(sk);
179
Jennifer Hunteac37312007-02-08 13:51:54 -0800180 if (iucv->path) {
181 low_nmcpy(user_data, iucv->src_name);
182 high_nmcpy(user_data, iucv->dst_name);
183 ASCEBC(user_data, sizeof(user_data));
184 err = iucv_path_sever(iucv->path, user_data);
185 iucv_path_free(iucv->path);
186 iucv->path = NULL;
187 }
188
Jennifer Hunteac37312007-02-08 13:51:54 -0800189 sk->sk_err = ECONNRESET;
190 sk->sk_state_change(sk);
191
192 skb_queue_purge(&iucv->send_skb_q);
Jennifer Hunt561e0362007-05-04 12:22:07 -0700193 skb_queue_purge(&iucv->backlog_skb_q);
Jennifer Hunteac37312007-02-08 13:51:54 -0800194
195 sock_set_flag(sk, SOCK_ZAPPED);
196 break;
197
198 default:
199 sock_set_flag(sk, SOCK_ZAPPED);
200 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700201 }
Jennifer Hunteac37312007-02-08 13:51:54 -0800202
203 release_sock(sk);
204 iucv_sock_kill(sk);
205}
206
207static void iucv_sock_init(struct sock *sk, struct sock *parent)
208{
209 if (parent)
210 sk->sk_type = parent->sk_type;
211}
212
213static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
214{
215 struct sock *sk;
216
Pavel Emelyanov6257ff22007-11-01 00:39:31 -0700217 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
Jennifer Hunteac37312007-02-08 13:51:54 -0800218 if (!sk)
219 return NULL;
220
221 sock_init_data(sock, sk);
222 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
Ursula Braunfebca282007-07-14 19:04:25 -0700223 spin_lock_init(&iucv_sk(sk)->accept_q_lock);
Jennifer Hunteac37312007-02-08 13:51:54 -0800224 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
Ursula Braunf0703c82007-10-08 02:03:31 -0700225 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
226 spin_lock_init(&iucv_sk(sk)->message_q.lock);
Jennifer Hunt561e0362007-05-04 12:22:07 -0700227 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
Jennifer Hunteac37312007-02-08 13:51:54 -0800228 iucv_sk(sk)->send_tag = 0;
229
230 sk->sk_destruct = iucv_sock_destruct;
231 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
232 sk->sk_allocation = GFP_DMA;
233
234 sock_reset_flag(sk, SOCK_ZAPPED);
235
236 sk->sk_protocol = proto;
237 sk->sk_state = IUCV_OPEN;
238
Pavel Emelyanovb24b8a22008-01-23 21:20:07 -0800239 setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
Jennifer Hunteac37312007-02-08 13:51:54 -0800240
241 iucv_sock_link(&iucv_sk_list, sk);
242 return sk;
243}
244
245/* Create an IUCV socket */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -0700246static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
Jennifer Hunteac37312007-02-08 13:51:54 -0800247{
248 struct sock *sk;
249
250 if (sock->type != SOCK_STREAM)
251 return -ESOCKTNOSUPPORT;
252
253 sock->state = SS_UNCONNECTED;
254 sock->ops = &iucv_sock_ops;
255
256 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
257 if (!sk)
258 return -ENOMEM;
259
260 iucv_sock_init(sk, NULL);
261
262 return 0;
263}
264
265void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
266{
267 write_lock_bh(&l->lock);
268 sk_add_node(sk, &l->head);
269 write_unlock_bh(&l->lock);
270}
271
272void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
273{
274 write_lock_bh(&l->lock);
275 sk_del_node_init(sk);
276 write_unlock_bh(&l->lock);
277}
278
279void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
280{
Ursula Braunfebca282007-07-14 19:04:25 -0700281 unsigned long flags;
282 struct iucv_sock *par = iucv_sk(parent);
283
Jennifer Hunteac37312007-02-08 13:51:54 -0800284 sock_hold(sk);
Ursula Braunfebca282007-07-14 19:04:25 -0700285 spin_lock_irqsave(&par->accept_q_lock, flags);
286 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
287 spin_unlock_irqrestore(&par->accept_q_lock, flags);
Jennifer Hunteac37312007-02-08 13:51:54 -0800288 iucv_sk(sk)->parent = parent;
289 parent->sk_ack_backlog++;
290}
291
292void iucv_accept_unlink(struct sock *sk)
293{
Ursula Braunfebca282007-07-14 19:04:25 -0700294 unsigned long flags;
295 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
296
297 spin_lock_irqsave(&par->accept_q_lock, flags);
Jennifer Hunteac37312007-02-08 13:51:54 -0800298 list_del_init(&iucv_sk(sk)->accept_q);
Ursula Braunfebca282007-07-14 19:04:25 -0700299 spin_unlock_irqrestore(&par->accept_q_lock, flags);
Jennifer Hunteac37312007-02-08 13:51:54 -0800300 iucv_sk(sk)->parent->sk_ack_backlog--;
301 iucv_sk(sk)->parent = NULL;
302 sock_put(sk);
303}
304
305struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
306{
307 struct iucv_sock *isk, *n;
308 struct sock *sk;
309
Heiko Carstensda99f052007-05-04 12:23:27 -0700310 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800311 sk = (struct sock *) isk;
312 lock_sock(sk);
313
314 if (sk->sk_state == IUCV_CLOSED) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800315 iucv_accept_unlink(sk);
Ursula Braunfebca282007-07-14 19:04:25 -0700316 release_sock(sk);
Jennifer Hunteac37312007-02-08 13:51:54 -0800317 continue;
318 }
319
320 if (sk->sk_state == IUCV_CONNECTED ||
321 sk->sk_state == IUCV_SEVERED ||
322 !newsock) {
323 iucv_accept_unlink(sk);
324 if (newsock)
325 sock_graft(sk, newsock);
326
327 if (sk->sk_state == IUCV_SEVERED)
328 sk->sk_state = IUCV_DISCONN;
329
330 release_sock(sk);
331 return sk;
332 }
333
334 release_sock(sk);
335 }
336 return NULL;
337}
338
339int iucv_sock_wait_state(struct sock *sk, int state, int state2,
340 unsigned long timeo)
341{
342 DECLARE_WAITQUEUE(wait, current);
343 int err = 0;
344
345 add_wait_queue(sk->sk_sleep, &wait);
346 while (sk->sk_state != state && sk->sk_state != state2) {
347 set_current_state(TASK_INTERRUPTIBLE);
348
349 if (!timeo) {
350 err = -EAGAIN;
351 break;
352 }
353
354 if (signal_pending(current)) {
355 err = sock_intr_errno(timeo);
356 break;
357 }
358
359 release_sock(sk);
360 timeo = schedule_timeout(timeo);
361 lock_sock(sk);
362
363 err = sock_error(sk);
364 if (err)
365 break;
366 }
367 set_current_state(TASK_RUNNING);
368 remove_wait_queue(sk->sk_sleep, &wait);
369 return err;
370}
371
372/* Bind an unbound socket */
373static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
374 int addr_len)
375{
376 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
377 struct sock *sk = sock->sk;
378 struct iucv_sock *iucv;
379 int err;
380
381 /* Verify the input sockaddr */
382 if (!addr || addr->sa_family != AF_IUCV)
383 return -EINVAL;
384
385 lock_sock(sk);
386 if (sk->sk_state != IUCV_OPEN) {
387 err = -EBADFD;
388 goto done;
389 }
390
391 write_lock_bh(&iucv_sk_list.lock);
392
393 iucv = iucv_sk(sk);
394 if (__iucv_get_sock_by_name(sa->siucv_name)) {
395 err = -EADDRINUSE;
396 goto done_unlock;
397 }
398 if (iucv->path) {
399 err = 0;
400 goto done_unlock;
401 }
402
403 /* Bind the socket */
404 memcpy(iucv->src_name, sa->siucv_name, 8);
405
406 /* Copy the user id */
407 memcpy(iucv->src_user_id, iucv_userid, 8);
408 sk->sk_state = IUCV_BOUND;
409 err = 0;
410
411done_unlock:
412 /* Release the socket list lock */
413 write_unlock_bh(&iucv_sk_list.lock);
414done:
415 release_sock(sk);
416 return err;
417}
418
419/* Automatically bind an unbound socket */
420static int iucv_sock_autobind(struct sock *sk)
421{
422 struct iucv_sock *iucv = iucv_sk(sk);
423 char query_buffer[80];
424 char name[12];
425 int err = 0;
426
427 /* Set the userid and name */
428 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
429 if (unlikely(err))
430 return -EPROTO;
431
432 memcpy(iucv->src_user_id, query_buffer, 8);
433
434 write_lock_bh(&iucv_sk_list.lock);
435
436 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
437 while (__iucv_get_sock_by_name(name)) {
438 sprintf(name, "%08x",
439 atomic_inc_return(&iucv_sk_list.autobind_name));
440 }
441
442 write_unlock_bh(&iucv_sk_list.lock);
443
444 memcpy(&iucv->src_name, name, 8);
445
446 return err;
447}
448
449/* Connect an unconnected socket */
450static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
451 int alen, int flags)
452{
453 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
454 struct sock *sk = sock->sk;
455 struct iucv_sock *iucv;
456 unsigned char user_data[16];
457 int err;
458
459 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
460 return -EINVAL;
461
462 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
463 return -EBADFD;
464
465 if (sk->sk_type != SOCK_STREAM)
466 return -EINVAL;
467
468 iucv = iucv_sk(sk);
469
470 if (sk->sk_state == IUCV_OPEN) {
471 err = iucv_sock_autobind(sk);
472 if (unlikely(err))
473 return err;
474 }
475
476 lock_sock(sk);
477
478 /* Set the destination information */
479 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
480 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
481
482 high_nmcpy(user_data, sa->siucv_name);
483 low_nmcpy(user_data, iucv_sk(sk)->src_name);
484 ASCEBC(user_data, sizeof(user_data));
485
486 iucv = iucv_sk(sk);
487 /* Create path. */
488 iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
489 IPRMDATA, GFP_KERNEL);
Ursula Braund4444722008-02-07 18:07:19 -0800490 if (!iucv->path) {
491 err = -ENOMEM;
492 goto done;
493 }
Jennifer Hunteac37312007-02-08 13:51:54 -0800494 err = iucv_path_connect(iucv->path, &af_iucv_handler,
495 sa->siucv_user_id, NULL, user_data, sk);
496 if (err) {
497 iucv_path_free(iucv->path);
498 iucv->path = NULL;
Hendrik Brueckner55cdea92009-01-05 18:07:07 -0800499 switch (err) {
500 case 0x0b: /* Target communicator is not logged on */
501 err = -ENETUNREACH;
502 break;
503 case 0x0d: /* Max connections for this guest exceeded */
504 case 0x0e: /* Max connections for target guest exceeded */
505 err = -EAGAIN;
506 break;
507 case 0x0f: /* Missing IUCV authorization */
508 err = -EACCES;
509 break;
510 default:
511 err = -ECONNREFUSED;
512 break;
513 }
Jennifer Hunteac37312007-02-08 13:51:54 -0800514 goto done;
515 }
516
517 if (sk->sk_state != IUCV_CONNECTED) {
518 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
519 sock_sndtimeo(sk, flags & O_NONBLOCK));
520 }
521
522 if (sk->sk_state == IUCV_DISCONN) {
523 release_sock(sk);
524 return -ECONNREFUSED;
525 }
Ursula Braun18becbc2009-01-05 18:07:46 -0800526
527 if (err) {
528 iucv_path_sever(iucv->path, NULL);
529 iucv_path_free(iucv->path);
530 iucv->path = NULL;
531 }
532
Jennifer Hunteac37312007-02-08 13:51:54 -0800533done:
534 release_sock(sk);
535 return err;
536}
537
538/* Move a socket into listening state. */
539static int iucv_sock_listen(struct socket *sock, int backlog)
540{
541 struct sock *sk = sock->sk;
542 int err;
543
544 lock_sock(sk);
545
546 err = -EINVAL;
547 if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
548 goto done;
549
550 sk->sk_max_ack_backlog = backlog;
551 sk->sk_ack_backlog = 0;
552 sk->sk_state = IUCV_LISTEN;
553 err = 0;
554
555done:
556 release_sock(sk);
557 return err;
558}
559
560/* Accept a pending connection */
561static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
562 int flags)
563{
564 DECLARE_WAITQUEUE(wait, current);
565 struct sock *sk = sock->sk, *nsk;
566 long timeo;
567 int err = 0;
568
Jennifer Hunt561e0362007-05-04 12:22:07 -0700569 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
Jennifer Hunteac37312007-02-08 13:51:54 -0800570
571 if (sk->sk_state != IUCV_LISTEN) {
572 err = -EBADFD;
573 goto done;
574 }
575
576 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
577
578 /* Wait for an incoming connection */
579 add_wait_queue_exclusive(sk->sk_sleep, &wait);
Heiko Carstensda99f052007-05-04 12:23:27 -0700580 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800581 set_current_state(TASK_INTERRUPTIBLE);
582 if (!timeo) {
583 err = -EAGAIN;
584 break;
585 }
586
587 release_sock(sk);
588 timeo = schedule_timeout(timeo);
Jennifer Hunt561e0362007-05-04 12:22:07 -0700589 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
Jennifer Hunteac37312007-02-08 13:51:54 -0800590
591 if (sk->sk_state != IUCV_LISTEN) {
592 err = -EBADFD;
593 break;
594 }
595
596 if (signal_pending(current)) {
597 err = sock_intr_errno(timeo);
598 break;
599 }
600 }
601
602 set_current_state(TASK_RUNNING);
603 remove_wait_queue(sk->sk_sleep, &wait);
604
605 if (err)
606 goto done;
607
608 newsock->state = SS_CONNECTED;
609
610done:
611 release_sock(sk);
612 return err;
613}
614
615static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
616 int *len, int peer)
617{
618 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
619 struct sock *sk = sock->sk;
620
621 addr->sa_family = AF_IUCV;
622 *len = sizeof(struct sockaddr_iucv);
623
624 if (peer) {
625 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
626 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
627 } else {
628 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
629 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
630 }
631 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
632 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
633 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
634
635 return 0;
636}
637
638static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
639 struct msghdr *msg, size_t len)
640{
641 struct sock *sk = sock->sk;
642 struct iucv_sock *iucv = iucv_sk(sk);
643 struct sk_buff *skb;
644 struct iucv_message txmsg;
Ursula Braun8f7c5022008-12-25 13:39:47 +0100645 char user_id[9];
646 char appl_id[9];
Jennifer Hunteac37312007-02-08 13:51:54 -0800647 int err;
648
649 err = sock_error(sk);
650 if (err)
651 return err;
652
653 if (msg->msg_flags & MSG_OOB)
654 return -EOPNOTSUPP;
655
656 lock_sock(sk);
657
658 if (sk->sk_shutdown & SEND_SHUTDOWN) {
659 err = -EPIPE;
660 goto out;
661 }
662
Heiko Carstensda99f052007-05-04 12:23:27 -0700663 if (sk->sk_state == IUCV_CONNECTED) {
664 if (!(skb = sock_alloc_send_skb(sk, len,
665 msg->msg_flags & MSG_DONTWAIT,
666 &err)))
Jennifer Hunt561e0362007-05-04 12:22:07 -0700667 goto out;
Jennifer Hunteac37312007-02-08 13:51:54 -0800668
Heiko Carstensda99f052007-05-04 12:23:27 -0700669 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800670 err = -EFAULT;
671 goto fail;
672 }
673
674 txmsg.class = 0;
Ursula Braun469689a4d2008-06-09 15:51:03 -0700675 memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
Jennifer Hunteac37312007-02-08 13:51:54 -0800676 txmsg.tag = iucv->send_tag++;
677 memcpy(skb->cb, &txmsg.tag, 4);
678 skb_queue_tail(&iucv->send_skb_q, skb);
679 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
680 (void *) skb->data, skb->len);
681 if (err) {
Ursula Braun8f7c5022008-12-25 13:39:47 +0100682 if (err == 3) {
683 user_id[8] = 0;
684 memcpy(user_id, iucv->dst_user_id, 8);
685 appl_id[8] = 0;
686 memcpy(appl_id, iucv->dst_name, 8);
687 pr_err("Application %s on z/VM guest %s"
688 " exceeds message limit\n",
689 user_id, appl_id);
690 }
Jennifer Hunteac37312007-02-08 13:51:54 -0800691 skb_unlink(skb, &iucv->send_skb_q);
692 err = -EPIPE;
693 goto fail;
694 }
695
696 } else {
697 err = -ENOTCONN;
698 goto out;
699 }
700
701 release_sock(sk);
702 return len;
703
704fail:
705 kfree_skb(skb);
706out:
707 release_sock(sk);
708 return err;
709}
710
Ursula Braunf0703c82007-10-08 02:03:31 -0700711static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
712{
713 int dataleft, size, copied = 0;
714 struct sk_buff *nskb;
715
716 dataleft = len;
717 while (dataleft) {
718 if (dataleft >= sk->sk_rcvbuf / 4)
719 size = sk->sk_rcvbuf / 4;
720 else
721 size = dataleft;
722
723 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
724 if (!nskb)
725 return -ENOMEM;
726
727 memcpy(nskb->data, skb->data + copied, size);
728 copied += size;
729 dataleft -= size;
730
731 skb_reset_transport_header(nskb);
732 skb_reset_network_header(nskb);
733 nskb->len = size;
734
735 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
736 }
737
738 return 0;
739}
740
741static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
742 struct iucv_path *path,
743 struct iucv_message *msg)
744{
745 int rc;
746
747 if (msg->flags & IPRMDATA) {
748 skb->data = NULL;
749 skb->len = 0;
750 } else {
751 rc = iucv_message_receive(path, msg, 0, skb->data,
752 msg->length, NULL);
753 if (rc) {
754 kfree_skb(skb);
755 return;
756 }
757 if (skb->truesize >= sk->sk_rcvbuf / 4) {
758 rc = iucv_fragment_skb(sk, skb, msg->length);
759 kfree_skb(skb);
760 skb = NULL;
761 if (rc) {
762 iucv_path_sever(path, NULL);
763 return;
764 }
765 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
766 } else {
767 skb_reset_transport_header(skb);
768 skb_reset_network_header(skb);
769 skb->len = msg->length;
770 }
771 }
772
773 if (sock_queue_rcv_skb(sk, skb))
774 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
775}
776
777static void iucv_process_message_q(struct sock *sk)
778{
779 struct iucv_sock *iucv = iucv_sk(sk);
780 struct sk_buff *skb;
781 struct sock_msg_q *p, *n;
782
783 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
784 skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA);
785 if (!skb)
786 break;
787 iucv_process_message(sk, skb, p->path, &p->msg);
788 list_del(&p->list);
789 kfree(p);
790 if (!skb_queue_empty(&iucv->backlog_skb_q))
791 break;
792 }
793}
794
Jennifer Hunteac37312007-02-08 13:51:54 -0800795static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
796 struct msghdr *msg, size_t len, int flags)
797{
798 int noblock = flags & MSG_DONTWAIT;
799 struct sock *sk = sock->sk;
Jennifer Hunt561e0362007-05-04 12:22:07 -0700800 struct iucv_sock *iucv = iucv_sk(sk);
Jennifer Hunteac37312007-02-08 13:51:54 -0800801 int target, copied = 0;
Jennifer Hunt561e0362007-05-04 12:22:07 -0700802 struct sk_buff *skb, *rskb, *cskb;
Jennifer Hunteac37312007-02-08 13:51:54 -0800803 int err = 0;
804
Jennifer Hunt561e0362007-05-04 12:22:07 -0700805 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
Ursula Braunf0703c82007-10-08 02:03:31 -0700806 skb_queue_empty(&iucv->backlog_skb_q) &&
807 skb_queue_empty(&sk->sk_receive_queue) &&
808 list_empty(&iucv->message_q.list))
Jennifer Hunt561e0362007-05-04 12:22:07 -0700809 return 0;
810
Jennifer Hunteac37312007-02-08 13:51:54 -0800811 if (flags & (MSG_OOB))
812 return -EOPNOTSUPP;
813
814 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
815
816 skb = skb_recv_datagram(sk, flags, noblock, &err);
817 if (!skb) {
818 if (sk->sk_shutdown & RCV_SHUTDOWN)
819 return 0;
820 return err;
821 }
822
823 copied = min_t(unsigned int, skb->len, len);
824
Jennifer Hunt561e0362007-05-04 12:22:07 -0700825 cskb = skb;
826 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800827 skb_queue_head(&sk->sk_receive_queue, skb);
828 if (copied == 0)
829 return -EFAULT;
Jennifer Hunt561e0362007-05-04 12:22:07 -0700830 goto done;
Jennifer Hunteac37312007-02-08 13:51:54 -0800831 }
832
833 len -= copied;
834
835 /* Mark read part of skb as used */
836 if (!(flags & MSG_PEEK)) {
837 skb_pull(skb, copied);
838
839 if (skb->len) {
840 skb_queue_head(&sk->sk_receive_queue, skb);
841 goto done;
842 }
843
844 kfree_skb(skb);
Jennifer Hunt561e0362007-05-04 12:22:07 -0700845
846 /* Queue backlog skbs */
Ursula Braunf0703c82007-10-08 02:03:31 -0700847 rskb = skb_dequeue(&iucv->backlog_skb_q);
Heiko Carstensda99f052007-05-04 12:23:27 -0700848 while (rskb) {
Jennifer Hunt561e0362007-05-04 12:22:07 -0700849 if (sock_queue_rcv_skb(sk, rskb)) {
Ursula Braunf0703c82007-10-08 02:03:31 -0700850 skb_queue_head(&iucv->backlog_skb_q,
Jennifer Hunt561e0362007-05-04 12:22:07 -0700851 rskb);
852 break;
853 } else {
Ursula Braunf0703c82007-10-08 02:03:31 -0700854 rskb = skb_dequeue(&iucv->backlog_skb_q);
Jennifer Hunt561e0362007-05-04 12:22:07 -0700855 }
856 }
Ursula Braunf0703c82007-10-08 02:03:31 -0700857 if (skb_queue_empty(&iucv->backlog_skb_q)) {
858 spin_lock_bh(&iucv->message_q.lock);
859 if (!list_empty(&iucv->message_q.list))
860 iucv_process_message_q(sk);
861 spin_unlock_bh(&iucv->message_q.lock);
862 }
863
Jennifer Hunteac37312007-02-08 13:51:54 -0800864 } else
865 skb_queue_head(&sk->sk_receive_queue, skb);
866
867done:
868 return err ? : copied;
869}
870
871static inline unsigned int iucv_accept_poll(struct sock *parent)
872{
873 struct iucv_sock *isk, *n;
874 struct sock *sk;
875
Heiko Carstensda99f052007-05-04 12:23:27 -0700876 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800877 sk = (struct sock *) isk;
878
879 if (sk->sk_state == IUCV_CONNECTED)
880 return POLLIN | POLLRDNORM;
881 }
882
883 return 0;
884}
885
886unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
887 poll_table *wait)
888{
889 struct sock *sk = sock->sk;
890 unsigned int mask = 0;
891
892 poll_wait(file, sk->sk_sleep, wait);
893
894 if (sk->sk_state == IUCV_LISTEN)
895 return iucv_accept_poll(sk);
896
897 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
898 mask |= POLLERR;
899
900 if (sk->sk_shutdown & RCV_SHUTDOWN)
901 mask |= POLLRDHUP;
902
903 if (sk->sk_shutdown == SHUTDOWN_MASK)
904 mask |= POLLHUP;
905
906 if (!skb_queue_empty(&sk->sk_receive_queue) ||
Heiko Carstensda99f052007-05-04 12:23:27 -0700907 (sk->sk_shutdown & RCV_SHUTDOWN))
Jennifer Hunteac37312007-02-08 13:51:54 -0800908 mask |= POLLIN | POLLRDNORM;
909
910 if (sk->sk_state == IUCV_CLOSED)
911 mask |= POLLHUP;
912
Jennifer Hunt561e0362007-05-04 12:22:07 -0700913 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
914 mask |= POLLIN;
915
Jennifer Hunteac37312007-02-08 13:51:54 -0800916 if (sock_writeable(sk))
917 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
918 else
919 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
920
921 return mask;
922}
923
924static int iucv_sock_shutdown(struct socket *sock, int how)
925{
926 struct sock *sk = sock->sk;
927 struct iucv_sock *iucv = iucv_sk(sk);
928 struct iucv_message txmsg;
929 int err = 0;
930 u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
931
932 how++;
933
934 if ((how & ~SHUTDOWN_MASK) || !how)
935 return -EINVAL;
936
937 lock_sock(sk);
Heiko Carstensda99f052007-05-04 12:23:27 -0700938 switch (sk->sk_state) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800939 case IUCV_CLOSED:
940 err = -ENOTCONN;
941 goto fail;
942
943 default:
944 sk->sk_shutdown |= how;
945 break;
946 }
947
948 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
949 txmsg.class = 0;
950 txmsg.tag = 0;
951 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
952 (void *) prmmsg, 8);
953 if (err) {
Heiko Carstensda99f052007-05-04 12:23:27 -0700954 switch (err) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800955 case 1:
956 err = -ENOTCONN;
957 break;
958 case 2:
959 err = -ECONNRESET;
960 break;
961 default:
962 err = -ENOTCONN;
963 break;
964 }
965 }
966 }
967
968 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
969 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
970 if (err)
971 err = -ENOTCONN;
972
973 skb_queue_purge(&sk->sk_receive_queue);
974 }
975
976 /* Wake up anyone sleeping in poll */
977 sk->sk_state_change(sk);
978
979fail:
980 release_sock(sk);
981 return err;
982}
983
984static int iucv_sock_release(struct socket *sock)
985{
986 struct sock *sk = sock->sk;
987 int err = 0;
988
989 if (!sk)
990 return 0;
991
992 iucv_sock_close(sk);
993
994 /* Unregister with IUCV base support */
995 if (iucv_sk(sk)->path) {
996 iucv_path_sever(iucv_sk(sk)->path, NULL);
997 iucv_path_free(iucv_sk(sk)->path);
998 iucv_sk(sk)->path = NULL;
999 }
1000
Jennifer Hunteac37312007-02-08 13:51:54 -08001001 sock_orphan(sk);
1002 iucv_sock_kill(sk);
1003 return err;
1004}
1005
1006/* Callback wrappers - called from iucv base support */
1007static int iucv_callback_connreq(struct iucv_path *path,
1008 u8 ipvmid[8], u8 ipuser[16])
1009{
1010 unsigned char user_data[16];
1011 unsigned char nuser_data[16];
1012 unsigned char src_name[8];
1013 struct hlist_node *node;
1014 struct sock *sk, *nsk;
1015 struct iucv_sock *iucv, *niucv;
1016 int err;
1017
1018 memcpy(src_name, ipuser, 8);
1019 EBCASC(src_name, 8);
1020 /* Find out if this path belongs to af_iucv. */
1021 read_lock(&iucv_sk_list.lock);
1022 iucv = NULL;
Ursula Braunfebca282007-07-14 19:04:25 -07001023 sk = NULL;
Jennifer Hunteac37312007-02-08 13:51:54 -08001024 sk_for_each(sk, node, &iucv_sk_list.head)
1025 if (sk->sk_state == IUCV_LISTEN &&
1026 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1027 /*
1028 * Found a listening socket with
1029 * src_name == ipuser[0-7].
1030 */
1031 iucv = iucv_sk(sk);
1032 break;
1033 }
1034 read_unlock(&iucv_sk_list.lock);
1035 if (!iucv)
1036 /* No socket found, not one of our paths. */
1037 return -EINVAL;
1038
1039 bh_lock_sock(sk);
1040
1041 /* Check if parent socket is listening */
1042 low_nmcpy(user_data, iucv->src_name);
1043 high_nmcpy(user_data, iucv->dst_name);
1044 ASCEBC(user_data, sizeof(user_data));
1045 if (sk->sk_state != IUCV_LISTEN) {
1046 err = iucv_path_sever(path, user_data);
Hendrik Brueckner65dbd7c2009-01-05 18:08:23 -08001047 iucv_path_free(path);
Jennifer Hunteac37312007-02-08 13:51:54 -08001048 goto fail;
1049 }
1050
1051 /* Check for backlog size */
1052 if (sk_acceptq_is_full(sk)) {
1053 err = iucv_path_sever(path, user_data);
Hendrik Brueckner65dbd7c2009-01-05 18:08:23 -08001054 iucv_path_free(path);
Jennifer Hunteac37312007-02-08 13:51:54 -08001055 goto fail;
1056 }
1057
1058 /* Create the new socket */
1059 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
Heiko Carstensda99f052007-05-04 12:23:27 -07001060 if (!nsk) {
Jennifer Hunteac37312007-02-08 13:51:54 -08001061 err = iucv_path_sever(path, user_data);
Hendrik Brueckner65dbd7c2009-01-05 18:08:23 -08001062 iucv_path_free(path);
Jennifer Hunteac37312007-02-08 13:51:54 -08001063 goto fail;
1064 }
1065
1066 niucv = iucv_sk(nsk);
1067 iucv_sock_init(nsk, sk);
1068
1069 /* Set the new iucv_sock */
1070 memcpy(niucv->dst_name, ipuser + 8, 8);
1071 EBCASC(niucv->dst_name, 8);
1072 memcpy(niucv->dst_user_id, ipvmid, 8);
1073 memcpy(niucv->src_name, iucv->src_name, 8);
1074 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1075 niucv->path = path;
1076
1077 /* Call iucv_accept */
1078 high_nmcpy(nuser_data, ipuser + 8);
1079 memcpy(nuser_data + 8, niucv->src_name, 8);
1080 ASCEBC(nuser_data + 8, 8);
1081
1082 path->msglim = IUCV_QUEUELEN_DEFAULT;
1083 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
Heiko Carstensda99f052007-05-04 12:23:27 -07001084 if (err) {
Jennifer Hunteac37312007-02-08 13:51:54 -08001085 err = iucv_path_sever(path, user_data);
Hendrik Brueckner65dbd7c2009-01-05 18:08:23 -08001086 iucv_path_free(path);
1087 iucv_sock_kill(nsk);
Jennifer Hunteac37312007-02-08 13:51:54 -08001088 goto fail;
1089 }
1090
1091 iucv_accept_enqueue(sk, nsk);
1092
1093 /* Wake up accept */
1094 nsk->sk_state = IUCV_CONNECTED;
1095 sk->sk_data_ready(sk, 1);
1096 err = 0;
1097fail:
1098 bh_unlock_sock(sk);
1099 return 0;
1100}
1101
1102static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1103{
1104 struct sock *sk = path->private;
1105
1106 sk->sk_state = IUCV_CONNECTED;
1107 sk->sk_state_change(sk);
1108}
1109
1110static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1111{
1112 struct sock *sk = path->private;
Jennifer Hunt561e0362007-05-04 12:22:07 -07001113 struct iucv_sock *iucv = iucv_sk(sk);
Ursula Braunf0703c82007-10-08 02:03:31 -07001114 struct sk_buff *skb;
1115 struct sock_msg_q *save_msg;
1116 int len;
Jennifer Hunt561e0362007-05-04 12:22:07 -07001117
Jennifer Hunteac37312007-02-08 13:51:54 -08001118 if (sk->sk_shutdown & RCV_SHUTDOWN)
1119 return;
1120
Ursula Braunf0703c82007-10-08 02:03:31 -07001121 if (!list_empty(&iucv->message_q.list) ||
1122 !skb_queue_empty(&iucv->backlog_skb_q))
1123 goto save_message;
1124
1125 len = atomic_read(&sk->sk_rmem_alloc);
1126 len += msg->length + sizeof(struct sk_buff);
1127 if (len > sk->sk_rcvbuf)
1128 goto save_message;
1129
Jennifer Hunteac37312007-02-08 13:51:54 -08001130 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
Ursula Braunf0703c82007-10-08 02:03:31 -07001131 if (!skb)
1132 goto save_message;
Jennifer Hunteac37312007-02-08 13:51:54 -08001133
Ursula Braunf0703c82007-10-08 02:03:31 -07001134 spin_lock(&iucv->message_q.lock);
1135 iucv_process_message(sk, skb, path, msg);
1136 spin_unlock(&iucv->message_q.lock);
Jennifer Hunteac37312007-02-08 13:51:54 -08001137
Ursula Braunf0703c82007-10-08 02:03:31 -07001138 return;
Jennifer Hunt561e0362007-05-04 12:22:07 -07001139
Ursula Braunf0703c82007-10-08 02:03:31 -07001140save_message:
1141 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
Ursula Braund4444722008-02-07 18:07:19 -08001142 if (!save_msg)
1143 return;
Ursula Braunf0703c82007-10-08 02:03:31 -07001144 save_msg->path = path;
1145 save_msg->msg = *msg;
1146
1147 spin_lock(&iucv->message_q.lock);
1148 list_add_tail(&save_msg->list, &iucv->message_q.list);
1149 spin_unlock(&iucv->message_q.lock);
Jennifer Hunteac37312007-02-08 13:51:54 -08001150}
1151
1152static void iucv_callback_txdone(struct iucv_path *path,
1153 struct iucv_message *msg)
1154{
1155 struct sock *sk = path->private;
Ursula Braunf2a77992008-02-07 18:07:44 -08001156 struct sk_buff *this = NULL;
Jennifer Hunteac37312007-02-08 13:51:54 -08001157 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1158 struct sk_buff *list_skb = list->next;
1159 unsigned long flags;
1160
Ursula Braunf2a77992008-02-07 18:07:44 -08001161 if (!skb_queue_empty(list)) {
Jennifer Hunt561e0362007-05-04 12:22:07 -07001162 spin_lock_irqsave(&list->lock, flags);
Jennifer Hunteac37312007-02-08 13:51:54 -08001163
Ursula Braunf2a77992008-02-07 18:07:44 -08001164 while (list_skb != (struct sk_buff *)list) {
1165 if (!memcmp(&msg->tag, list_skb->cb, 4)) {
1166 this = list_skb;
1167 break;
1168 }
Jennifer Hunt561e0362007-05-04 12:22:07 -07001169 list_skb = list_skb->next;
Ursula Braunf2a77992008-02-07 18:07:44 -08001170 }
1171 if (this)
1172 __skb_unlink(this, list);
Jennifer Hunteac37312007-02-08 13:51:54 -08001173
Jennifer Hunt561e0362007-05-04 12:22:07 -07001174 spin_unlock_irqrestore(&list->lock, flags);
Jennifer Hunteac37312007-02-08 13:51:54 -08001175
Wei Yongjun47a30b22009-02-25 00:41:03 +00001176 kfree_skb(this);
Jennifer Hunt561e0362007-05-04 12:22:07 -07001177 }
Ursula Braunc2b4afd2008-07-14 09:59:29 +02001178 BUG_ON(!this);
Jennifer Hunt561e0362007-05-04 12:22:07 -07001179
Heiko Carstensda99f052007-05-04 12:23:27 -07001180 if (sk->sk_state == IUCV_CLOSING) {
Jennifer Hunt561e0362007-05-04 12:22:07 -07001181 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1182 sk->sk_state = IUCV_CLOSED;
1183 sk->sk_state_change(sk);
1184 }
1185 }
1186
Jennifer Hunteac37312007-02-08 13:51:54 -08001187}
1188
1189static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1190{
1191 struct sock *sk = path->private;
1192
1193 if (!list_empty(&iucv_sk(sk)->accept_q))
1194 sk->sk_state = IUCV_SEVERED;
1195 else
1196 sk->sk_state = IUCV_DISCONN;
1197
1198 sk->sk_state_change(sk);
1199}
1200
Hendrik Brueckneraf88b522009-04-21 23:26:21 +00001201/* called if the other communication side shuts down its RECV direction;
1202 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1203 */
1204static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1205{
1206 struct sock *sk = path->private;
1207
1208 bh_lock_sock(sk);
1209 if (sk->sk_state != IUCV_CLOSED) {
1210 sk->sk_shutdown |= SEND_SHUTDOWN;
1211 sk->sk_state_change(sk);
1212 }
1213 bh_unlock_sock(sk);
1214}
1215
Jennifer Hunteac37312007-02-08 13:51:54 -08001216static struct proto_ops iucv_sock_ops = {
1217 .family = PF_IUCV,
1218 .owner = THIS_MODULE,
1219 .release = iucv_sock_release,
1220 .bind = iucv_sock_bind,
1221 .connect = iucv_sock_connect,
1222 .listen = iucv_sock_listen,
1223 .accept = iucv_sock_accept,
1224 .getname = iucv_sock_getname,
1225 .sendmsg = iucv_sock_sendmsg,
1226 .recvmsg = iucv_sock_recvmsg,
1227 .poll = iucv_sock_poll,
1228 .ioctl = sock_no_ioctl,
1229 .mmap = sock_no_mmap,
1230 .socketpair = sock_no_socketpair,
1231 .shutdown = iucv_sock_shutdown,
1232 .setsockopt = sock_no_setsockopt,
1233 .getsockopt = sock_no_getsockopt
1234};
1235
1236static struct net_proto_family iucv_sock_family_ops = {
1237 .family = AF_IUCV,
1238 .owner = THIS_MODULE,
1239 .create = iucv_sock_create,
1240};
1241
Heiko Carstensda99f052007-05-04 12:23:27 -07001242static int __init afiucv_init(void)
Jennifer Hunteac37312007-02-08 13:51:54 -08001243{
1244 int err;
1245
1246 if (!MACHINE_IS_VM) {
Ursula Braun8f7c5022008-12-25 13:39:47 +01001247 pr_err("The af_iucv module cannot be loaded"
1248 " without z/VM\n");
Jennifer Hunteac37312007-02-08 13:51:54 -08001249 err = -EPROTONOSUPPORT;
1250 goto out;
1251 }
1252 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1253 if (unlikely(err)) {
Ursula Braunc2b4afd2008-07-14 09:59:29 +02001254 WARN_ON(err);
Jennifer Hunteac37312007-02-08 13:51:54 -08001255 err = -EPROTONOSUPPORT;
1256 goto out;
1257 }
1258
1259 err = iucv_register(&af_iucv_handler, 0);
1260 if (err)
1261 goto out;
1262 err = proto_register(&iucv_proto, 0);
1263 if (err)
1264 goto out_iucv;
1265 err = sock_register(&iucv_sock_family_ops);
1266 if (err)
1267 goto out_proto;
Jennifer Hunteac37312007-02-08 13:51:54 -08001268 return 0;
1269
1270out_proto:
1271 proto_unregister(&iucv_proto);
1272out_iucv:
1273 iucv_unregister(&af_iucv_handler, 0);
1274out:
1275 return err;
1276}
1277
1278static void __exit afiucv_exit(void)
1279{
1280 sock_unregister(PF_IUCV);
1281 proto_unregister(&iucv_proto);
1282 iucv_unregister(&af_iucv_handler, 0);
Jennifer Hunteac37312007-02-08 13:51:54 -08001283}
1284
1285module_init(afiucv_init);
1286module_exit(afiucv_exit);
1287
1288MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1289MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1290MODULE_VERSION(VERSION);
1291MODULE_LICENSE("GPL");
1292MODULE_ALIAS_NETPROTO(PF_IUCV);