af_iucv.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077
  1. /*
  2. * linux/net/iucv/af_iucv.c
  3. *
  4. * IUCV protocol stack for Linux on zSeries
  5. *
  6. * Copyright 2006 IBM Corporation
  7. *
  8. * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
  9. */
  10. #include <linux/module.h>
  11. #include <linux/types.h>
  12. #include <linux/list.h>
  13. #include <linux/errno.h>
  14. #include <linux/kernel.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/init.h>
  19. #include <linux/poll.h>
  20. #include <net/sock.h>
  21. #include <asm/ebcdic.h>
  22. #include <asm/cpcmd.h>
  23. #include <linux/kmod.h>
  24. #include <net/iucv/iucv.h>
  25. #include <net/iucv/af_iucv.h>
  26. #define CONFIG_IUCV_SOCK_DEBUG 1
  27. #define IPRMDATA 0x80
  28. #define VERSION "1.0"
  29. static char iucv_userid[80];
  30. static struct proto_ops iucv_sock_ops;
  31. static struct proto iucv_proto = {
  32. .name = "AF_IUCV",
  33. .owner = THIS_MODULE,
  34. .obj_size = sizeof(struct iucv_sock),
  35. };
  36. /* Call Back functions */
  37. static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
  38. static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
  39. static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
  40. static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
  41. static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
  42. static struct iucv_sock_list iucv_sk_list = {
  43. .lock = RW_LOCK_UNLOCKED,
  44. .autobind_name = ATOMIC_INIT(0)
  45. };
  46. static struct iucv_handler af_iucv_handler = {
  47. .path_pending = iucv_callback_connreq,
  48. .path_complete = iucv_callback_connack,
  49. .path_severed = iucv_callback_connrej,
  50. .message_pending = iucv_callback_rx,
  51. .message_complete = iucv_callback_txdone
  52. };
  53. static inline void high_nmcpy(unsigned char *dst, char *src)
  54. {
  55. memcpy(dst, src, 8);
  56. }
  57. static inline void low_nmcpy(unsigned char *dst, char *src)
  58. {
  59. memcpy(&dst[8], src, 8);
  60. }
  61. /* Timers */
  62. static void iucv_sock_timeout(unsigned long arg)
  63. {
  64. struct sock *sk = (struct sock *)arg;
  65. bh_lock_sock(sk);
  66. sk->sk_err = ETIMEDOUT;
  67. sk->sk_state_change(sk);
  68. bh_unlock_sock(sk);
  69. iucv_sock_kill(sk);
  70. sock_put(sk);
  71. }
  72. static void iucv_sock_clear_timer(struct sock *sk)
  73. {
  74. sk_stop_timer(sk, &sk->sk_timer);
  75. }
  76. static void iucv_sock_init_timer(struct sock *sk)
  77. {
  78. init_timer(&sk->sk_timer);
  79. sk->sk_timer.function = iucv_sock_timeout;
  80. sk->sk_timer.data = (unsigned long)sk;
  81. }
  82. static struct sock *__iucv_get_sock_by_name(char *nm)
  83. {
  84. struct sock *sk;
  85. struct hlist_node *node;
  86. sk_for_each(sk, node, &iucv_sk_list.head)
  87. if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
  88. return sk;
  89. return NULL;
  90. }
  91. static void iucv_sock_destruct(struct sock *sk)
  92. {
  93. skb_queue_purge(&sk->sk_receive_queue);
  94. skb_queue_purge(&sk->sk_write_queue);
  95. }
  96. /* Cleanup Listen */
  97. static void iucv_sock_cleanup_listen(struct sock *parent)
  98. {
  99. struct sock *sk;
  100. /* Close non-accepted connections */
  101. while ((sk = iucv_accept_dequeue(parent, NULL))) {
  102. iucv_sock_close(sk);
  103. iucv_sock_kill(sk);
  104. }
  105. parent->sk_state = IUCV_CLOSED;
  106. sock_set_flag(parent, SOCK_ZAPPED);
  107. }
  108. /* Kill socket */
  109. static void iucv_sock_kill(struct sock *sk)
  110. {
  111. if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
  112. return;
  113. iucv_sock_unlink(&iucv_sk_list, sk);
  114. sock_set_flag(sk, SOCK_DEAD);
  115. sock_put(sk);
  116. }
  117. /* Close an IUCV socket */
  118. static void iucv_sock_close(struct sock *sk)
  119. {
  120. unsigned char user_data[16];
  121. struct iucv_sock *iucv = iucv_sk(sk);
  122. int err;
  123. iucv_sock_clear_timer(sk);
  124. lock_sock(sk);
  125. switch(sk->sk_state) {
  126. case IUCV_LISTEN:
  127. iucv_sock_cleanup_listen(sk);
  128. break;
  129. case IUCV_CONNECTED:
  130. case IUCV_DISCONN:
  131. err = 0;
  132. if (iucv->path) {
  133. low_nmcpy(user_data, iucv->src_name);
  134. high_nmcpy(user_data, iucv->dst_name);
  135. ASCEBC(user_data, sizeof(user_data));
  136. err = iucv_path_sever(iucv->path, user_data);
  137. iucv_path_free(iucv->path);
  138. iucv->path = NULL;
  139. }
  140. sk->sk_state = IUCV_CLOSED;
  141. sk->sk_state_change(sk);
  142. sk->sk_err = ECONNRESET;
  143. sk->sk_state_change(sk);
  144. skb_queue_purge(&iucv->send_skb_q);
  145. sock_set_flag(sk, SOCK_ZAPPED);
  146. break;
  147. default:
  148. sock_set_flag(sk, SOCK_ZAPPED);
  149. break;
  150. };
  151. release_sock(sk);
  152. iucv_sock_kill(sk);
  153. }
  154. static void iucv_sock_init(struct sock *sk, struct sock *parent)
  155. {
  156. if (parent)
  157. sk->sk_type = parent->sk_type;
  158. }
  159. static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
  160. {
  161. struct sock *sk;
  162. sk = sk_alloc(PF_IUCV, prio, &iucv_proto, 1);
  163. if (!sk)
  164. return NULL;
  165. sock_init_data(sock, sk);
  166. INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
  167. skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
  168. iucv_sk(sk)->send_tag = 0;
  169. sk->sk_destruct = iucv_sock_destruct;
  170. sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
  171. sk->sk_allocation = GFP_DMA;
  172. sock_reset_flag(sk, SOCK_ZAPPED);
  173. sk->sk_protocol = proto;
  174. sk->sk_state = IUCV_OPEN;
  175. iucv_sock_init_timer(sk);
  176. iucv_sock_link(&iucv_sk_list, sk);
  177. return sk;
  178. }
  179. /* Create an IUCV socket */
  180. static int iucv_sock_create(struct socket *sock, int protocol)
  181. {
  182. struct sock *sk;
  183. if (sock->type != SOCK_STREAM)
  184. return -ESOCKTNOSUPPORT;
  185. sock->state = SS_UNCONNECTED;
  186. sock->ops = &iucv_sock_ops;
  187. sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
  188. if (!sk)
  189. return -ENOMEM;
  190. iucv_sock_init(sk, NULL);
  191. return 0;
  192. }
  193. void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
  194. {
  195. write_lock_bh(&l->lock);
  196. sk_add_node(sk, &l->head);
  197. write_unlock_bh(&l->lock);
  198. }
  199. void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
  200. {
  201. write_lock_bh(&l->lock);
  202. sk_del_node_init(sk);
  203. write_unlock_bh(&l->lock);
  204. }
  205. void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
  206. {
  207. sock_hold(sk);
  208. list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q);
  209. iucv_sk(sk)->parent = parent;
  210. parent->sk_ack_backlog++;
  211. }
  212. void iucv_accept_unlink(struct sock *sk)
  213. {
  214. list_del_init(&iucv_sk(sk)->accept_q);
  215. iucv_sk(sk)->parent->sk_ack_backlog--;
  216. iucv_sk(sk)->parent = NULL;
  217. sock_put(sk);
  218. }
  219. struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
  220. {
  221. struct iucv_sock *isk, *n;
  222. struct sock *sk;
  223. list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
  224. sk = (struct sock *) isk;
  225. lock_sock(sk);
  226. if (sk->sk_state == IUCV_CLOSED) {
  227. release_sock(sk);
  228. iucv_accept_unlink(sk);
  229. continue;
  230. }
  231. if (sk->sk_state == IUCV_CONNECTED ||
  232. sk->sk_state == IUCV_SEVERED ||
  233. !newsock) {
  234. iucv_accept_unlink(sk);
  235. if (newsock)
  236. sock_graft(sk, newsock);
  237. if (sk->sk_state == IUCV_SEVERED)
  238. sk->sk_state = IUCV_DISCONN;
  239. release_sock(sk);
  240. return sk;
  241. }
  242. release_sock(sk);
  243. }
  244. return NULL;
  245. }
  246. int iucv_sock_wait_state(struct sock *sk, int state, int state2,
  247. unsigned long timeo)
  248. {
  249. DECLARE_WAITQUEUE(wait, current);
  250. int err = 0;
  251. add_wait_queue(sk->sk_sleep, &wait);
  252. while (sk->sk_state != state && sk->sk_state != state2) {
  253. set_current_state(TASK_INTERRUPTIBLE);
  254. if (!timeo) {
  255. err = -EAGAIN;
  256. break;
  257. }
  258. if (signal_pending(current)) {
  259. err = sock_intr_errno(timeo);
  260. break;
  261. }
  262. release_sock(sk);
  263. timeo = schedule_timeout(timeo);
  264. lock_sock(sk);
  265. err = sock_error(sk);
  266. if (err)
  267. break;
  268. }
  269. set_current_state(TASK_RUNNING);
  270. remove_wait_queue(sk->sk_sleep, &wait);
  271. return err;
  272. }
  273. /* Bind an unbound socket */
  274. static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
  275. int addr_len)
  276. {
  277. struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
  278. struct sock *sk = sock->sk;
  279. struct iucv_sock *iucv;
  280. int err;
  281. /* Verify the input sockaddr */
  282. if (!addr || addr->sa_family != AF_IUCV)
  283. return -EINVAL;
  284. lock_sock(sk);
  285. if (sk->sk_state != IUCV_OPEN) {
  286. err = -EBADFD;
  287. goto done;
  288. }
  289. write_lock_bh(&iucv_sk_list.lock);
  290. iucv = iucv_sk(sk);
  291. if (__iucv_get_sock_by_name(sa->siucv_name)) {
  292. err = -EADDRINUSE;
  293. goto done_unlock;
  294. }
  295. if (iucv->path) {
  296. err = 0;
  297. goto done_unlock;
  298. }
  299. /* Bind the socket */
  300. memcpy(iucv->src_name, sa->siucv_name, 8);
  301. /* Copy the user id */
  302. memcpy(iucv->src_user_id, iucv_userid, 8);
  303. sk->sk_state = IUCV_BOUND;
  304. err = 0;
  305. done_unlock:
  306. /* Release the socket list lock */
  307. write_unlock_bh(&iucv_sk_list.lock);
  308. done:
  309. release_sock(sk);
  310. return err;
  311. }
  312. /* Automatically bind an unbound socket */
  313. static int iucv_sock_autobind(struct sock *sk)
  314. {
  315. struct iucv_sock *iucv = iucv_sk(sk);
  316. char query_buffer[80];
  317. char name[12];
  318. int err = 0;
  319. /* Set the userid and name */
  320. cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
  321. if (unlikely(err))
  322. return -EPROTO;
  323. memcpy(iucv->src_user_id, query_buffer, 8);
  324. write_lock_bh(&iucv_sk_list.lock);
  325. sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
  326. while (__iucv_get_sock_by_name(name)) {
  327. sprintf(name, "%08x",
  328. atomic_inc_return(&iucv_sk_list.autobind_name));
  329. }
  330. write_unlock_bh(&iucv_sk_list.lock);
  331. memcpy(&iucv->src_name, name, 8);
  332. return err;
  333. }
  334. /* Connect an unconnected socket */
  335. static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
  336. int alen, int flags)
  337. {
  338. struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
  339. struct sock *sk = sock->sk;
  340. struct iucv_sock *iucv;
  341. unsigned char user_data[16];
  342. int err;
  343. if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
  344. return -EINVAL;
  345. if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
  346. return -EBADFD;
  347. if (sk->sk_type != SOCK_STREAM)
  348. return -EINVAL;
  349. iucv = iucv_sk(sk);
  350. if (sk->sk_state == IUCV_OPEN) {
  351. err = iucv_sock_autobind(sk);
  352. if (unlikely(err))
  353. return err;
  354. }
  355. lock_sock(sk);
  356. /* Set the destination information */
  357. memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
  358. memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
  359. high_nmcpy(user_data, sa->siucv_name);
  360. low_nmcpy(user_data, iucv_sk(sk)->src_name);
  361. ASCEBC(user_data, sizeof(user_data));
  362. iucv = iucv_sk(sk);
  363. /* Create path. */
  364. iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
  365. IPRMDATA, GFP_KERNEL);
  366. err = iucv_path_connect(iucv->path, &af_iucv_handler,
  367. sa->siucv_user_id, NULL, user_data, sk);
  368. if (err) {
  369. iucv_path_free(iucv->path);
  370. iucv->path = NULL;
  371. err = -ECONNREFUSED;
  372. goto done;
  373. }
  374. if (sk->sk_state != IUCV_CONNECTED) {
  375. err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
  376. sock_sndtimeo(sk, flags & O_NONBLOCK));
  377. }
  378. if (sk->sk_state == IUCV_DISCONN) {
  379. release_sock(sk);
  380. return -ECONNREFUSED;
  381. }
  382. done:
  383. release_sock(sk);
  384. return err;
  385. }
  386. /* Move a socket into listening state. */
  387. static int iucv_sock_listen(struct socket *sock, int backlog)
  388. {
  389. struct sock *sk = sock->sk;
  390. int err;
  391. lock_sock(sk);
  392. err = -EINVAL;
  393. if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
  394. goto done;
  395. sk->sk_max_ack_backlog = backlog;
  396. sk->sk_ack_backlog = 0;
  397. sk->sk_state = IUCV_LISTEN;
  398. err = 0;
  399. done:
  400. release_sock(sk);
  401. return err;
  402. }
  403. /* Accept a pending connection */
  404. static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
  405. int flags)
  406. {
  407. DECLARE_WAITQUEUE(wait, current);
  408. struct sock *sk = sock->sk, *nsk;
  409. long timeo;
  410. int err = 0;
  411. lock_sock(sk);
  412. if (sk->sk_state != IUCV_LISTEN) {
  413. err = -EBADFD;
  414. goto done;
  415. }
  416. timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
  417. /* Wait for an incoming connection */
  418. add_wait_queue_exclusive(sk->sk_sleep, &wait);
  419. while (!(nsk = iucv_accept_dequeue(sk, newsock))){
  420. set_current_state(TASK_INTERRUPTIBLE);
  421. if (!timeo) {
  422. err = -EAGAIN;
  423. break;
  424. }
  425. release_sock(sk);
  426. timeo = schedule_timeout(timeo);
  427. lock_sock(sk);
  428. if (sk->sk_state != IUCV_LISTEN) {
  429. err = -EBADFD;
  430. break;
  431. }
  432. if (signal_pending(current)) {
  433. err = sock_intr_errno(timeo);
  434. break;
  435. }
  436. }
  437. set_current_state(TASK_RUNNING);
  438. remove_wait_queue(sk->sk_sleep, &wait);
  439. if (err)
  440. goto done;
  441. newsock->state = SS_CONNECTED;
  442. done:
  443. release_sock(sk);
  444. return err;
  445. }
  446. static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
  447. int *len, int peer)
  448. {
  449. struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
  450. struct sock *sk = sock->sk;
  451. addr->sa_family = AF_IUCV;
  452. *len = sizeof(struct sockaddr_iucv);
  453. if (peer) {
  454. memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
  455. memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
  456. } else {
  457. memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
  458. memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
  459. }
  460. memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
  461. memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
  462. memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
  463. return 0;
  464. }
  465. static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
  466. struct msghdr *msg, size_t len)
  467. {
  468. struct sock *sk = sock->sk;
  469. struct iucv_sock *iucv = iucv_sk(sk);
  470. struct sk_buff *skb;
  471. struct iucv_message txmsg;
  472. int err;
  473. err = sock_error(sk);
  474. if (err)
  475. return err;
  476. if (msg->msg_flags & MSG_OOB)
  477. return -EOPNOTSUPP;
  478. lock_sock(sk);
  479. if (sk->sk_shutdown & SEND_SHUTDOWN) {
  480. err = -EPIPE;
  481. goto out;
  482. }
  483. if (sk->sk_state == IUCV_CONNECTED){
  484. if(!(skb = sock_alloc_send_skb(sk, len,
  485. msg->msg_flags & MSG_DONTWAIT,
  486. &err)))
  487. return err;
  488. if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)){
  489. err = -EFAULT;
  490. goto fail;
  491. }
  492. txmsg.class = 0;
  493. txmsg.tag = iucv->send_tag++;
  494. memcpy(skb->cb, &txmsg.tag, 4);
  495. skb_queue_tail(&iucv->send_skb_q, skb);
  496. err = iucv_message_send(iucv->path, &txmsg, 0, 0,
  497. (void *) skb->data, skb->len);
  498. if (err) {
  499. if (err == 3)
  500. printk(KERN_ERR "AF_IUCV msg limit exceeded\n");
  501. skb_unlink(skb, &iucv->send_skb_q);
  502. err = -EPIPE;
  503. goto fail;
  504. }
  505. } else {
  506. err = -ENOTCONN;
  507. goto out;
  508. }
  509. release_sock(sk);
  510. return len;
  511. fail:
  512. kfree_skb(skb);
  513. out:
  514. release_sock(sk);
  515. return err;
  516. }
  517. static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
  518. struct msghdr *msg, size_t len, int flags)
  519. {
  520. int noblock = flags & MSG_DONTWAIT;
  521. struct sock *sk = sock->sk;
  522. int target, copied = 0;
  523. struct sk_buff *skb;
  524. int err = 0;
  525. if (flags & (MSG_OOB))
  526. return -EOPNOTSUPP;
  527. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  528. skb = skb_recv_datagram(sk, flags, noblock, &err);
  529. if (!skb) {
  530. if (sk->sk_shutdown & RCV_SHUTDOWN)
  531. return 0;
  532. return err;
  533. }
  534. copied = min_t(unsigned int, skb->len, len);
  535. if (memcpy_toiovec(msg->msg_iov, skb->data, copied)) {
  536. skb_queue_head(&sk->sk_receive_queue, skb);
  537. if (copied == 0)
  538. return -EFAULT;
  539. }
  540. len -= copied;
  541. /* Mark read part of skb as used */
  542. if (!(flags & MSG_PEEK)) {
  543. skb_pull(skb, copied);
  544. if (skb->len) {
  545. skb_queue_head(&sk->sk_receive_queue, skb);
  546. goto done;
  547. }
  548. kfree_skb(skb);
  549. } else
  550. skb_queue_head(&sk->sk_receive_queue, skb);
  551. done:
  552. return err ? : copied;
  553. }
  554. static inline unsigned int iucv_accept_poll(struct sock *parent)
  555. {
  556. struct iucv_sock *isk, *n;
  557. struct sock *sk;
  558. list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
  559. sk = (struct sock *) isk;
  560. if (sk->sk_state == IUCV_CONNECTED)
  561. return POLLIN | POLLRDNORM;
  562. }
  563. return 0;
  564. }
  565. unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
  566. poll_table *wait)
  567. {
  568. struct sock *sk = sock->sk;
  569. unsigned int mask = 0;
  570. poll_wait(file, sk->sk_sleep, wait);
  571. if (sk->sk_state == IUCV_LISTEN)
  572. return iucv_accept_poll(sk);
  573. if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
  574. mask |= POLLERR;
  575. if (sk->sk_shutdown & RCV_SHUTDOWN)
  576. mask |= POLLRDHUP;
  577. if (sk->sk_shutdown == SHUTDOWN_MASK)
  578. mask |= POLLHUP;
  579. if (!skb_queue_empty(&sk->sk_receive_queue) ||
  580. (sk->sk_shutdown & RCV_SHUTDOWN))
  581. mask |= POLLIN | POLLRDNORM;
  582. if (sk->sk_state == IUCV_CLOSED)
  583. mask |= POLLHUP;
  584. if (sock_writeable(sk))
  585. mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
  586. else
  587. set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  588. return mask;
  589. }
  590. static int iucv_sock_shutdown(struct socket *sock, int how)
  591. {
  592. struct sock *sk = sock->sk;
  593. struct iucv_sock *iucv = iucv_sk(sk);
  594. struct iucv_message txmsg;
  595. int err = 0;
  596. u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
  597. how++;
  598. if ((how & ~SHUTDOWN_MASK) || !how)
  599. return -EINVAL;
  600. lock_sock(sk);
  601. switch(sk->sk_state) {
  602. case IUCV_CLOSED:
  603. err = -ENOTCONN;
  604. goto fail;
  605. default:
  606. sk->sk_shutdown |= how;
  607. break;
  608. }
  609. if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
  610. txmsg.class = 0;
  611. txmsg.tag = 0;
  612. err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
  613. (void *) prmmsg, 8);
  614. if (err) {
  615. switch(err) {
  616. case 1:
  617. err = -ENOTCONN;
  618. break;
  619. case 2:
  620. err = -ECONNRESET;
  621. break;
  622. default:
  623. err = -ENOTCONN;
  624. break;
  625. }
  626. }
  627. }
  628. if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
  629. err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
  630. if (err)
  631. err = -ENOTCONN;
  632. skb_queue_purge(&sk->sk_receive_queue);
  633. }
  634. /* Wake up anyone sleeping in poll */
  635. sk->sk_state_change(sk);
  636. fail:
  637. release_sock(sk);
  638. return err;
  639. }
  640. static int iucv_sock_release(struct socket *sock)
  641. {
  642. struct sock *sk = sock->sk;
  643. int err = 0;
  644. if (!sk)
  645. return 0;
  646. iucv_sock_close(sk);
  647. /* Unregister with IUCV base support */
  648. if (iucv_sk(sk)->path) {
  649. iucv_path_sever(iucv_sk(sk)->path, NULL);
  650. iucv_path_free(iucv_sk(sk)->path);
  651. iucv_sk(sk)->path = NULL;
  652. }
  653. if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime){
  654. lock_sock(sk);
  655. err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0,
  656. sk->sk_lingertime);
  657. release_sock(sk);
  658. }
  659. sock_orphan(sk);
  660. iucv_sock_kill(sk);
  661. return err;
  662. }
  663. /* Callback wrappers - called from iucv base support */
  664. static int iucv_callback_connreq(struct iucv_path *path,
  665. u8 ipvmid[8], u8 ipuser[16])
  666. {
  667. unsigned char user_data[16];
  668. unsigned char nuser_data[16];
  669. unsigned char src_name[8];
  670. struct hlist_node *node;
  671. struct sock *sk, *nsk;
  672. struct iucv_sock *iucv, *niucv;
  673. int err;
  674. memcpy(src_name, ipuser, 8);
  675. EBCASC(src_name, 8);
  676. /* Find out if this path belongs to af_iucv. */
  677. read_lock(&iucv_sk_list.lock);
  678. iucv = NULL;
  679. sk_for_each(sk, node, &iucv_sk_list.head)
  680. if (sk->sk_state == IUCV_LISTEN &&
  681. !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
  682. /*
  683. * Found a listening socket with
  684. * src_name == ipuser[0-7].
  685. */
  686. iucv = iucv_sk(sk);
  687. break;
  688. }
  689. read_unlock(&iucv_sk_list.lock);
  690. if (!iucv)
  691. /* No socket found, not one of our paths. */
  692. return -EINVAL;
  693. bh_lock_sock(sk);
  694. /* Check if parent socket is listening */
  695. low_nmcpy(user_data, iucv->src_name);
  696. high_nmcpy(user_data, iucv->dst_name);
  697. ASCEBC(user_data, sizeof(user_data));
  698. if (sk->sk_state != IUCV_LISTEN) {
  699. err = iucv_path_sever(path, user_data);
  700. goto fail;
  701. }
  702. /* Check for backlog size */
  703. if (sk_acceptq_is_full(sk)) {
  704. err = iucv_path_sever(path, user_data);
  705. goto fail;
  706. }
  707. /* Create the new socket */
  708. nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
  709. if (!nsk){
  710. err = iucv_path_sever(path, user_data);
  711. goto fail;
  712. }
  713. niucv = iucv_sk(nsk);
  714. iucv_sock_init(nsk, sk);
  715. /* Set the new iucv_sock */
  716. memcpy(niucv->dst_name, ipuser + 8, 8);
  717. EBCASC(niucv->dst_name, 8);
  718. memcpy(niucv->dst_user_id, ipvmid, 8);
  719. memcpy(niucv->src_name, iucv->src_name, 8);
  720. memcpy(niucv->src_user_id, iucv->src_user_id, 8);
  721. niucv->path = path;
  722. /* Call iucv_accept */
  723. high_nmcpy(nuser_data, ipuser + 8);
  724. memcpy(nuser_data + 8, niucv->src_name, 8);
  725. ASCEBC(nuser_data + 8, 8);
  726. path->msglim = IUCV_QUEUELEN_DEFAULT;
  727. err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
  728. if (err){
  729. err = iucv_path_sever(path, user_data);
  730. goto fail;
  731. }
  732. iucv_accept_enqueue(sk, nsk);
  733. /* Wake up accept */
  734. nsk->sk_state = IUCV_CONNECTED;
  735. sk->sk_data_ready(sk, 1);
  736. err = 0;
  737. fail:
  738. bh_unlock_sock(sk);
  739. return 0;
  740. }
  741. static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
  742. {
  743. struct sock *sk = path->private;
  744. sk->sk_state = IUCV_CONNECTED;
  745. sk->sk_state_change(sk);
  746. }
  747. static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
  748. {
  749. struct sock *sk = path->private;
  750. struct sk_buff *skb;
  751. int rc;
  752. if (sk->sk_shutdown & RCV_SHUTDOWN)
  753. return;
  754. skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
  755. if (!skb) {
  756. iucv_message_reject(path, msg);
  757. return;
  758. }
  759. if (msg->flags & IPRMDATA) {
  760. skb->data = NULL;
  761. skb->len = 0;
  762. } else {
  763. rc = iucv_message_receive(path, msg, 0, skb->data,
  764. msg->length, NULL);
  765. if (rc) {
  766. kfree_skb(skb);
  767. return;
  768. }
  769. skb->h.raw = skb->data;
  770. skb->nh.raw = skb->data;
  771. skb->len = msg->length;
  772. }
  773. if (sock_queue_rcv_skb(sk, skb))
  774. kfree_skb(skb);
  775. }
  776. static void iucv_callback_txdone(struct iucv_path *path,
  777. struct iucv_message *msg)
  778. {
  779. struct sock *sk = path->private;
  780. struct sk_buff *this;
  781. struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
  782. struct sk_buff *list_skb = list->next;
  783. unsigned long flags;
  784. spin_lock_irqsave(&list->lock, flags);
  785. do {
  786. this = list_skb;
  787. list_skb = list_skb->next;
  788. } while (memcmp(&msg->tag, this->cb, 4));
  789. spin_unlock_irqrestore(&list->lock, flags);
  790. skb_unlink(this, &iucv_sk(sk)->send_skb_q);
  791. kfree_skb(this);
  792. }
  793. static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
  794. {
  795. struct sock *sk = path->private;
  796. if (!list_empty(&iucv_sk(sk)->accept_q))
  797. sk->sk_state = IUCV_SEVERED;
  798. else
  799. sk->sk_state = IUCV_DISCONN;
  800. sk->sk_state_change(sk);
  801. }
  802. static struct proto_ops iucv_sock_ops = {
  803. .family = PF_IUCV,
  804. .owner = THIS_MODULE,
  805. .release = iucv_sock_release,
  806. .bind = iucv_sock_bind,
  807. .connect = iucv_sock_connect,
  808. .listen = iucv_sock_listen,
  809. .accept = iucv_sock_accept,
  810. .getname = iucv_sock_getname,
  811. .sendmsg = iucv_sock_sendmsg,
  812. .recvmsg = iucv_sock_recvmsg,
  813. .poll = iucv_sock_poll,
  814. .ioctl = sock_no_ioctl,
  815. .mmap = sock_no_mmap,
  816. .socketpair = sock_no_socketpair,
  817. .shutdown = iucv_sock_shutdown,
  818. .setsockopt = sock_no_setsockopt,
  819. .getsockopt = sock_no_getsockopt
  820. };
  821. static struct net_proto_family iucv_sock_family_ops = {
  822. .family = AF_IUCV,
  823. .owner = THIS_MODULE,
  824. .create = iucv_sock_create,
  825. };
  826. static int afiucv_init(void)
  827. {
  828. int err;
  829. if (!MACHINE_IS_VM) {
  830. printk(KERN_ERR "AF_IUCV connection needs VM as base\n");
  831. err = -EPROTONOSUPPORT;
  832. goto out;
  833. }
  834. cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
  835. if (unlikely(err)) {
  836. printk(KERN_ERR "AF_IUCV needs the VM userid\n");
  837. err = -EPROTONOSUPPORT;
  838. goto out;
  839. }
  840. err = iucv_register(&af_iucv_handler, 0);
  841. if (err)
  842. goto out;
  843. err = proto_register(&iucv_proto, 0);
  844. if (err)
  845. goto out_iucv;
  846. err = sock_register(&iucv_sock_family_ops);
  847. if (err)
  848. goto out_proto;
  849. printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n");
  850. return 0;
  851. out_proto:
  852. proto_unregister(&iucv_proto);
  853. out_iucv:
  854. iucv_unregister(&af_iucv_handler, 0);
  855. out:
  856. return err;
  857. }
  858. static void __exit afiucv_exit(void)
  859. {
  860. sock_unregister(PF_IUCV);
  861. proto_unregister(&iucv_proto);
  862. iucv_unregister(&af_iucv_handler, 0);
  863. printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n");
  864. }
  865. module_init(afiucv_init);
  866. module_exit(afiucv_exit);
  867. MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
  868. MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
  869. MODULE_VERSION(VERSION);
  870. MODULE_LICENSE("GPL");
  871. MODULE_ALIAS_NETPROTO(PF_IUCV);