transport.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846
  1. /* transport.c: Rx Transport routines
  2. *
  3. * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/module.h>
  13. #include <rxrpc/transport.h>
  14. #include <rxrpc/peer.h>
  15. #include <rxrpc/connection.h>
  16. #include <rxrpc/call.h>
  17. #include <rxrpc/message.h>
  18. #include <rxrpc/krxiod.h>
  19. #include <rxrpc/krxsecd.h>
  20. #include <linux/udp.h>
  21. #include <linux/in.h>
  22. #include <linux/in6.h>
  23. #include <linux/icmp.h>
  24. #include <linux/skbuff.h>
  25. #include <net/sock.h>
  26. #include <net/ip.h>
  27. #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
  28. #include <linux/ipv6.h> /* this should _really_ be in errqueue.h.. */
  29. #endif
  30. #include <linux/errqueue.h>
  31. #include <asm/uaccess.h>
  32. #include "internal.h"
  33. struct errormsg {
  34. struct cmsghdr cmsg; /* control message header */
  35. struct sock_extended_err ee; /* extended error information */
  36. struct sockaddr_in icmp_src; /* ICMP packet source address */
  37. };
  38. static DEFINE_SPINLOCK(rxrpc_transports_lock);
  39. static struct list_head rxrpc_transports = LIST_HEAD_INIT(rxrpc_transports);
  40. __RXACCT_DECL(atomic_t rxrpc_transport_count);
  41. LIST_HEAD(rxrpc_proc_transports);
  42. DECLARE_RWSEM(rxrpc_proc_transports_sem);
  43. static void rxrpc_data_ready(struct sock *sk, int count);
  44. static void rxrpc_error_report(struct sock *sk);
  45. static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
  46. struct list_head *msgq);
  47. static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans);
  48. /*****************************************************************************/
  49. /*
  50. * create a new transport endpoint using the specified UDP port
  51. */
  52. int rxrpc_create_transport(unsigned short port,
  53. struct rxrpc_transport **_trans)
  54. {
  55. struct rxrpc_transport *trans;
  56. struct sockaddr_in sin;
  57. mm_segment_t oldfs;
  58. struct sock *sock;
  59. int ret, opt;
  60. _enter("%hu", port);
  61. trans = kzalloc(sizeof(struct rxrpc_transport), GFP_KERNEL);
  62. if (!trans)
  63. return -ENOMEM;
  64. atomic_set(&trans->usage, 1);
  65. INIT_LIST_HEAD(&trans->services);
  66. INIT_LIST_HEAD(&trans->link);
  67. INIT_LIST_HEAD(&trans->krxiodq_link);
  68. spin_lock_init(&trans->lock);
  69. INIT_LIST_HEAD(&trans->peer_active);
  70. INIT_LIST_HEAD(&trans->peer_graveyard);
  71. spin_lock_init(&trans->peer_gylock);
  72. init_waitqueue_head(&trans->peer_gy_waitq);
  73. rwlock_init(&trans->peer_lock);
  74. atomic_set(&trans->peer_count, 0);
  75. trans->port = port;
  76. /* create a UDP socket to be my actual transport endpoint */
  77. ret = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &trans->socket);
  78. if (ret < 0)
  79. goto error;
  80. /* use the specified port */
  81. if (port) {
  82. memset(&sin, 0, sizeof(sin));
  83. sin.sin_family = AF_INET;
  84. sin.sin_port = htons(port);
  85. ret = trans->socket->ops->bind(trans->socket,
  86. (struct sockaddr *) &sin,
  87. sizeof(sin));
  88. if (ret < 0)
  89. goto error;
  90. }
  91. opt = 1;
  92. oldfs = get_fs();
  93. set_fs(KERNEL_DS);
  94. ret = trans->socket->ops->setsockopt(trans->socket, SOL_IP, IP_RECVERR,
  95. (char *) &opt, sizeof(opt));
  96. set_fs(oldfs);
  97. spin_lock(&rxrpc_transports_lock);
  98. list_add(&trans->link, &rxrpc_transports);
  99. spin_unlock(&rxrpc_transports_lock);
  100. /* set the socket up */
  101. sock = trans->socket->sk;
  102. sock->sk_user_data = trans;
  103. sock->sk_data_ready = rxrpc_data_ready;
  104. sock->sk_error_report = rxrpc_error_report;
  105. down_write(&rxrpc_proc_transports_sem);
  106. list_add_tail(&trans->proc_link, &rxrpc_proc_transports);
  107. up_write(&rxrpc_proc_transports_sem);
  108. __RXACCT(atomic_inc(&rxrpc_transport_count));
  109. *_trans = trans;
  110. _leave(" = 0 (%p)", trans);
  111. return 0;
  112. error:
  113. /* finish cleaning up the transport (not really needed here, but...) */
  114. if (trans->socket)
  115. trans->socket->ops->shutdown(trans->socket, 2);
  116. /* close the socket */
  117. if (trans->socket) {
  118. trans->socket->sk->sk_user_data = NULL;
  119. sock_release(trans->socket);
  120. trans->socket = NULL;
  121. }
  122. kfree(trans);
  123. _leave(" = %d", ret);
  124. return ret;
  125. } /* end rxrpc_create_transport() */
  126. /*****************************************************************************/
  127. /*
  128. * destroy a transport endpoint
  129. */
  130. void rxrpc_put_transport(struct rxrpc_transport *trans)
  131. {
  132. _enter("%p{u=%d p=%hu}",
  133. trans, atomic_read(&trans->usage), trans->port);
  134. BUG_ON(atomic_read(&trans->usage) <= 0);
  135. /* to prevent a race, the decrement and the dequeue must be
  136. * effectively atomic */
  137. spin_lock(&rxrpc_transports_lock);
  138. if (likely(!atomic_dec_and_test(&trans->usage))) {
  139. spin_unlock(&rxrpc_transports_lock);
  140. _leave("");
  141. return;
  142. }
  143. list_del(&trans->link);
  144. spin_unlock(&rxrpc_transports_lock);
  145. /* finish cleaning up the transport */
  146. if (trans->socket)
  147. trans->socket->ops->shutdown(trans->socket, 2);
  148. rxrpc_krxsecd_clear_transport(trans);
  149. rxrpc_krxiod_dequeue_transport(trans);
  150. /* discard all peer information */
  151. rxrpc_peer_clearall(trans);
  152. down_write(&rxrpc_proc_transports_sem);
  153. list_del(&trans->proc_link);
  154. up_write(&rxrpc_proc_transports_sem);
  155. __RXACCT(atomic_dec(&rxrpc_transport_count));
  156. /* close the socket */
  157. if (trans->socket) {
  158. trans->socket->sk->sk_user_data = NULL;
  159. sock_release(trans->socket);
  160. trans->socket = NULL;
  161. }
  162. kfree(trans);
  163. _leave("");
  164. } /* end rxrpc_put_transport() */
  165. /*****************************************************************************/
  166. /*
  167. * add a service to a transport to be listened upon
  168. */
  169. int rxrpc_add_service(struct rxrpc_transport *trans,
  170. struct rxrpc_service *newsrv)
  171. {
  172. struct rxrpc_service *srv;
  173. struct list_head *_p;
  174. int ret = -EEXIST;
  175. _enter("%p{%hu},%p{%hu}",
  176. trans, trans->port, newsrv, newsrv->service_id);
  177. /* verify that the service ID is not already present */
  178. spin_lock(&trans->lock);
  179. list_for_each(_p, &trans->services) {
  180. srv = list_entry(_p, struct rxrpc_service, link);
  181. if (srv->service_id == newsrv->service_id)
  182. goto out;
  183. }
  184. /* okay - add the transport to the list */
  185. list_add_tail(&newsrv->link, &trans->services);
  186. rxrpc_get_transport(trans);
  187. ret = 0;
  188. out:
  189. spin_unlock(&trans->lock);
  190. _leave("= %d", ret);
  191. return ret;
  192. } /* end rxrpc_add_service() */
  193. /*****************************************************************************/
  194. /*
  195. * remove a service from a transport
  196. */
  197. void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv)
  198. {
  199. _enter("%p{%hu},%p{%hu}", trans, trans->port, srv, srv->service_id);
  200. spin_lock(&trans->lock);
  201. list_del(&srv->link);
  202. spin_unlock(&trans->lock);
  203. rxrpc_put_transport(trans);
  204. _leave("");
  205. } /* end rxrpc_del_service() */
  206. /*****************************************************************************/
  207. /*
  208. * INET callback when data has been received on the socket.
  209. */
  210. static void rxrpc_data_ready(struct sock *sk, int count)
  211. {
  212. struct rxrpc_transport *trans;
  213. _enter("%p{t=%p},%d", sk, sk->sk_user_data, count);
  214. /* queue the transport for attention by krxiod */
  215. trans = (struct rxrpc_transport *) sk->sk_user_data;
  216. if (trans)
  217. rxrpc_krxiod_queue_transport(trans);
  218. /* wake up anyone waiting on the socket */
  219. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  220. wake_up_interruptible(sk->sk_sleep);
  221. _leave("");
  222. } /* end rxrpc_data_ready() */
  223. /*****************************************************************************/
  224. /*
  225. * INET callback when an ICMP error packet is received
  226. * - sk->err is error (EHOSTUNREACH, EPROTO or EMSGSIZE)
  227. */
  228. static void rxrpc_error_report(struct sock *sk)
  229. {
  230. struct rxrpc_transport *trans;
  231. _enter("%p{t=%p}", sk, sk->sk_user_data);
  232. /* queue the transport for attention by krxiod */
  233. trans = (struct rxrpc_transport *) sk->sk_user_data;
  234. if (trans) {
  235. trans->error_rcvd = 1;
  236. rxrpc_krxiod_queue_transport(trans);
  237. }
  238. /* wake up anyone waiting on the socket */
  239. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  240. wake_up_interruptible(sk->sk_sleep);
  241. _leave("");
  242. } /* end rxrpc_error_report() */
  243. /*****************************************************************************/
  244. /*
  245. * split a message up, allocating message records and filling them in
  246. * from the contents of a socket buffer
  247. */
  248. static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
  249. struct sk_buff *pkt,
  250. struct list_head *msgq)
  251. {
  252. struct rxrpc_message *msg;
  253. int ret;
  254. _enter("");
  255. msg = kzalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
  256. if (!msg) {
  257. _leave(" = -ENOMEM");
  258. return -ENOMEM;
  259. }
  260. atomic_set(&msg->usage, 1);
  261. list_add_tail(&msg->link,msgq);
  262. /* dig out the Rx routing parameters */
  263. if (skb_copy_bits(pkt, sizeof(struct udphdr),
  264. &msg->hdr, sizeof(msg->hdr)) < 0) {
  265. ret = -EBADMSG;
  266. goto error;
  267. }
  268. msg->trans = trans;
  269. msg->state = RXRPC_MSG_RECEIVED;
  270. skb_get_timestamp(pkt, &msg->stamp);
  271. if (msg->stamp.tv_sec == 0) {
  272. do_gettimeofday(&msg->stamp);
  273. if (pkt->sk)
  274. sock_enable_timestamp(pkt->sk);
  275. }
  276. msg->seq = ntohl(msg->hdr.seq);
  277. /* attach the packet */
  278. skb_get(pkt);
  279. msg->pkt = pkt;
  280. msg->offset = sizeof(struct udphdr) + sizeof(struct rxrpc_header);
  281. msg->dsize = msg->pkt->len - msg->offset;
  282. _net("Rx Received packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
  283. msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
  284. ntohl(msg->hdr.epoch),
  285. (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
  286. ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
  287. ntohl(msg->hdr.callNumber),
  288. rxrpc_pkts[msg->hdr.type],
  289. msg->hdr.flags,
  290. ntohs(msg->hdr.serviceId),
  291. msg->hdr.securityIndex);
  292. __RXACCT(atomic_inc(&rxrpc_message_count));
  293. /* split off jumbo packets */
  294. while (msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
  295. msg->hdr.flags & RXRPC_JUMBO_PACKET
  296. ) {
  297. struct rxrpc_jumbo_header jumbo;
  298. struct rxrpc_message *jumbomsg = msg;
  299. _debug("split jumbo packet");
  300. /* quick sanity check */
  301. ret = -EBADMSG;
  302. if (msg->dsize <
  303. RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
  304. goto error;
  305. if (msg->hdr.flags & RXRPC_LAST_PACKET)
  306. goto error;
  307. /* dig out the secondary header */
  308. if (skb_copy_bits(pkt, msg->offset + RXRPC_JUMBO_DATALEN,
  309. &jumbo, sizeof(jumbo)) < 0)
  310. goto error;
  311. /* allocate a new message record */
  312. ret = -ENOMEM;
  313. msg = kmemdup(jumbomsg, sizeof(struct rxrpc_message), GFP_KERNEL);
  314. if (!msg)
  315. goto error;
  316. list_add_tail(&msg->link, msgq);
  317. /* adjust the jumbo packet */
  318. jumbomsg->dsize = RXRPC_JUMBO_DATALEN;
  319. /* attach the packet here too */
  320. skb_get(pkt);
  321. /* adjust the parameters */
  322. msg->seq++;
  323. msg->hdr.seq = htonl(msg->seq);
  324. msg->hdr.serial = htonl(ntohl(msg->hdr.serial) + 1);
  325. msg->offset += RXRPC_JUMBO_DATALEN +
  326. sizeof(struct rxrpc_jumbo_header);
  327. msg->dsize -= RXRPC_JUMBO_DATALEN +
  328. sizeof(struct rxrpc_jumbo_header);
  329. msg->hdr.flags = jumbo.flags;
  330. msg->hdr._rsvd = jumbo._rsvd;
  331. _net("Rx Split jumbo packet from %s"
  332. " (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
  333. msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
  334. ntohl(msg->hdr.epoch),
  335. (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
  336. ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
  337. ntohl(msg->hdr.callNumber),
  338. rxrpc_pkts[msg->hdr.type],
  339. msg->hdr.flags,
  340. ntohs(msg->hdr.serviceId),
  341. msg->hdr.securityIndex);
  342. __RXACCT(atomic_inc(&rxrpc_message_count));
  343. }
  344. _leave(" = 0 #%d", atomic_read(&rxrpc_message_count));
  345. return 0;
  346. error:
  347. while (!list_empty(msgq)) {
  348. msg = list_entry(msgq->next, struct rxrpc_message, link);
  349. list_del_init(&msg->link);
  350. rxrpc_put_message(msg);
  351. }
  352. _leave(" = %d", ret);
  353. return ret;
  354. } /* end rxrpc_incoming_msg() */
  355. /*****************************************************************************/
  356. /*
  357. * accept a new call
  358. * - called from krxiod in process context
  359. */
  360. void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
  361. {
  362. struct rxrpc_message *msg;
  363. struct rxrpc_peer *peer;
  364. struct sk_buff *pkt;
  365. int ret;
  366. __be32 addr;
  367. __be16 port;
  368. LIST_HEAD(msgq);
  369. _enter("%p{%d}", trans, trans->port);
  370. for (;;) {
  371. /* deal with outstanting errors first */
  372. if (trans->error_rcvd)
  373. rxrpc_trans_receive_error_report(trans);
  374. /* attempt to receive a packet */
  375. pkt = skb_recv_datagram(trans->socket->sk, 0, 1, &ret);
  376. if (!pkt) {
  377. if (ret == -EAGAIN) {
  378. _leave(" EAGAIN");
  379. return;
  380. }
  381. /* an icmp error may have occurred */
  382. rxrpc_krxiod_queue_transport(trans);
  383. _leave(" error %d\n", ret);
  384. return;
  385. }
  386. /* we'll probably need to checksum it (didn't call
  387. * sock_recvmsg) */
  388. if (skb_checksum_complete(pkt)) {
  389. kfree_skb(pkt);
  390. rxrpc_krxiod_queue_transport(trans);
  391. _leave(" CSUM failed");
  392. return;
  393. }
  394. addr = pkt->nh.iph->saddr;
  395. port = pkt->h.uh->source;
  396. _net("Rx Received UDP packet from %08x:%04hu",
  397. ntohl(addr), ntohs(port));
  398. /* unmarshall the Rx parameters and split jumbo packets */
  399. ret = rxrpc_incoming_msg(trans, pkt, &msgq);
  400. if (ret < 0) {
  401. kfree_skb(pkt);
  402. rxrpc_krxiod_queue_transport(trans);
  403. _leave(" bad packet");
  404. return;
  405. }
  406. BUG_ON(list_empty(&msgq));
  407. msg = list_entry(msgq.next, struct rxrpc_message, link);
  408. /* locate the record for the peer from which it
  409. * originated */
  410. ret = rxrpc_peer_lookup(trans, addr, &peer);
  411. if (ret < 0) {
  412. kdebug("Rx No connections from that peer");
  413. rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
  414. goto finished_msg;
  415. }
  416. /* try and find a matching connection */
  417. ret = rxrpc_connection_lookup(peer, msg, &msg->conn);
  418. if (ret < 0) {
  419. kdebug("Rx Unknown Connection");
  420. rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
  421. rxrpc_put_peer(peer);
  422. goto finished_msg;
  423. }
  424. rxrpc_put_peer(peer);
  425. /* deal with the first packet of a new call */
  426. if (msg->hdr.flags & RXRPC_CLIENT_INITIATED &&
  427. msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
  428. ntohl(msg->hdr.seq) == 1
  429. ) {
  430. _debug("Rx New server call");
  431. rxrpc_trans_receive_new_call(trans, &msgq);
  432. goto finished_msg;
  433. }
  434. /* deal with subsequent packet(s) of call */
  435. _debug("Rx Call packet");
  436. while (!list_empty(&msgq)) {
  437. msg = list_entry(msgq.next, struct rxrpc_message, link);
  438. list_del_init(&msg->link);
  439. ret = rxrpc_conn_receive_call_packet(msg->conn, NULL, msg);
  440. if (ret < 0) {
  441. rxrpc_trans_immediate_abort(trans, msg, ret);
  442. rxrpc_put_message(msg);
  443. goto finished_msg;
  444. }
  445. rxrpc_put_message(msg);
  446. }
  447. goto finished_msg;
  448. /* dispose of the packets */
  449. finished_msg:
  450. while (!list_empty(&msgq)) {
  451. msg = list_entry(msgq.next, struct rxrpc_message, link);
  452. list_del_init(&msg->link);
  453. rxrpc_put_message(msg);
  454. }
  455. kfree_skb(pkt);
  456. }
  457. _leave("");
  458. } /* end rxrpc_trans_receive_packet() */
  459. /*****************************************************************************/
  460. /*
  461. * accept a new call from a client trying to connect to one of my services
  462. * - called in process context
  463. */
  464. static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
  465. struct list_head *msgq)
  466. {
  467. struct rxrpc_message *msg;
  468. _enter("");
  469. /* only bother with the first packet */
  470. msg = list_entry(msgq->next, struct rxrpc_message, link);
  471. list_del_init(&msg->link);
  472. rxrpc_krxsecd_queue_incoming_call(msg);
  473. rxrpc_put_message(msg);
  474. _leave(" = 0");
  475. return 0;
  476. } /* end rxrpc_trans_receive_new_call() */
  477. /*****************************************************************************/
  478. /*
  479. * perform an immediate abort without connection or call structures
  480. */
  481. int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
  482. struct rxrpc_message *msg,
  483. int error)
  484. {
  485. struct rxrpc_header ahdr;
  486. struct sockaddr_in sin;
  487. struct msghdr msghdr;
  488. struct kvec iov[2];
  489. __be32 _error;
  490. int len, ret;
  491. _enter("%p,%p,%d", trans, msg, error);
  492. /* don't abort an abort packet */
  493. if (msg->hdr.type == RXRPC_PACKET_TYPE_ABORT) {
  494. _leave(" = 0");
  495. return 0;
  496. }
  497. _error = htonl(-error);
  498. /* set up the message to be transmitted */
  499. memcpy(&ahdr, &msg->hdr, sizeof(ahdr));
  500. ahdr.epoch = msg->hdr.epoch;
  501. ahdr.serial = htonl(1);
  502. ahdr.seq = 0;
  503. ahdr.type = RXRPC_PACKET_TYPE_ABORT;
  504. ahdr.flags = RXRPC_LAST_PACKET;
  505. ahdr.flags |= ~msg->hdr.flags & RXRPC_CLIENT_INITIATED;
  506. iov[0].iov_len = sizeof(ahdr);
  507. iov[0].iov_base = &ahdr;
  508. iov[1].iov_len = sizeof(_error);
  509. iov[1].iov_base = &_error;
  510. len = sizeof(ahdr) + sizeof(_error);
  511. memset(&sin,0,sizeof(sin));
  512. sin.sin_family = AF_INET;
  513. sin.sin_port = msg->pkt->h.uh->source;
  514. sin.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
  515. msghdr.msg_name = &sin;
  516. msghdr.msg_namelen = sizeof(sin);
  517. msghdr.msg_control = NULL;
  518. msghdr.msg_controllen = 0;
  519. msghdr.msg_flags = MSG_DONTWAIT;
  520. _net("Sending message type %d of %d bytes to %08x:%d",
  521. ahdr.type,
  522. len,
  523. ntohl(sin.sin_addr.s_addr),
  524. ntohs(sin.sin_port));
  525. /* send the message */
  526. ret = kernel_sendmsg(trans->socket, &msghdr, iov, 2, len);
  527. _leave(" = %d", ret);
  528. return ret;
  529. } /* end rxrpc_trans_immediate_abort() */
  530. /*****************************************************************************/
  531. /*
  532. * receive an ICMP error report and percolate it to all connections
  533. * heading to the affected host or port
  534. */
  535. static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
  536. {
  537. struct rxrpc_connection *conn;
  538. struct sockaddr_in sin;
  539. struct rxrpc_peer *peer;
  540. struct list_head connq, *_p;
  541. struct errormsg emsg;
  542. struct msghdr msg;
  543. __be16 port;
  544. int local, err;
  545. _enter("%p", trans);
  546. for (;;) {
  547. trans->error_rcvd = 0;
  548. /* try and receive an error message */
  549. msg.msg_name = &sin;
  550. msg.msg_namelen = sizeof(sin);
  551. msg.msg_control = &emsg;
  552. msg.msg_controllen = sizeof(emsg);
  553. msg.msg_flags = 0;
  554. err = kernel_recvmsg(trans->socket, &msg, NULL, 0, 0,
  555. MSG_ERRQUEUE | MSG_DONTWAIT | MSG_TRUNC);
  556. if (err == -EAGAIN) {
  557. _leave("");
  558. return;
  559. }
  560. if (err < 0) {
  561. printk("%s: unable to recv an error report: %d\n",
  562. __FUNCTION__, err);
  563. _leave("");
  564. return;
  565. }
  566. msg.msg_controllen = (char *) msg.msg_control - (char *) &emsg;
  567. if (msg.msg_controllen < sizeof(emsg.cmsg) ||
  568. msg.msg_namelen < sizeof(sin)) {
  569. printk("%s: short control message"
  570. " (nlen=%u clen=%Zu fl=%x)\n",
  571. __FUNCTION__,
  572. msg.msg_namelen,
  573. msg.msg_controllen,
  574. msg.msg_flags);
  575. continue;
  576. }
  577. _net("Rx Received control message"
  578. " { len=%Zu level=%u type=%u }",
  579. emsg.cmsg.cmsg_len,
  580. emsg.cmsg.cmsg_level,
  581. emsg.cmsg.cmsg_type);
  582. if (sin.sin_family != AF_INET) {
  583. printk("Rx Ignoring error report with non-INET address"
  584. " (fam=%u)",
  585. sin.sin_family);
  586. continue;
  587. }
  588. _net("Rx Received message pertaining to host addr=%x port=%hu",
  589. ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port));
  590. if (emsg.cmsg.cmsg_level != SOL_IP ||
  591. emsg.cmsg.cmsg_type != IP_RECVERR) {
  592. printk("Rx Ignoring unknown error report"
  593. " { level=%u type=%u }",
  594. emsg.cmsg.cmsg_level,
  595. emsg.cmsg.cmsg_type);
  596. continue;
  597. }
  598. if (msg.msg_controllen < sizeof(emsg.cmsg) + sizeof(emsg.ee)) {
  599. printk("%s: short error message (%Zu)\n",
  600. __FUNCTION__, msg.msg_controllen);
  601. _leave("");
  602. return;
  603. }
  604. port = sin.sin_port;
  605. switch (emsg.ee.ee_origin) {
  606. case SO_EE_ORIGIN_ICMP:
  607. local = 0;
  608. switch (emsg.ee.ee_type) {
  609. case ICMP_DEST_UNREACH:
  610. switch (emsg.ee.ee_code) {
  611. case ICMP_NET_UNREACH:
  612. _net("Rx Received ICMP Network Unreachable");
  613. port = 0;
  614. err = -ENETUNREACH;
  615. break;
  616. case ICMP_HOST_UNREACH:
  617. _net("Rx Received ICMP Host Unreachable");
  618. port = 0;
  619. err = -EHOSTUNREACH;
  620. break;
  621. case ICMP_PORT_UNREACH:
  622. _net("Rx Received ICMP Port Unreachable");
  623. err = -ECONNREFUSED;
  624. break;
  625. case ICMP_NET_UNKNOWN:
  626. _net("Rx Received ICMP Unknown Network");
  627. port = 0;
  628. err = -ENETUNREACH;
  629. break;
  630. case ICMP_HOST_UNKNOWN:
  631. _net("Rx Received ICMP Unknown Host");
  632. port = 0;
  633. err = -EHOSTUNREACH;
  634. break;
  635. default:
  636. _net("Rx Received ICMP DestUnreach { code=%u }",
  637. emsg.ee.ee_code);
  638. err = emsg.ee.ee_errno;
  639. break;
  640. }
  641. break;
  642. case ICMP_TIME_EXCEEDED:
  643. _net("Rx Received ICMP TTL Exceeded");
  644. err = emsg.ee.ee_errno;
  645. break;
  646. default:
  647. _proto("Rx Received ICMP error { type=%u code=%u }",
  648. emsg.ee.ee_type, emsg.ee.ee_code);
  649. err = emsg.ee.ee_errno;
  650. break;
  651. }
  652. break;
  653. case SO_EE_ORIGIN_LOCAL:
  654. _proto("Rx Received local error { error=%d }",
  655. emsg.ee.ee_errno);
  656. local = 1;
  657. err = emsg.ee.ee_errno;
  658. break;
  659. case SO_EE_ORIGIN_NONE:
  660. case SO_EE_ORIGIN_ICMP6:
  661. default:
  662. _proto("Rx Received error report { orig=%u }",
  663. emsg.ee.ee_origin);
  664. local = 0;
  665. err = emsg.ee.ee_errno;
  666. break;
  667. }
  668. /* find all the connections between this transport and the
  669. * affected destination */
  670. INIT_LIST_HEAD(&connq);
  671. if (rxrpc_peer_lookup(trans, sin.sin_addr.s_addr,
  672. &peer) == 0) {
  673. read_lock(&peer->conn_lock);
  674. list_for_each(_p, &peer->conn_active) {
  675. conn = list_entry(_p, struct rxrpc_connection,
  676. link);
  677. if (port && conn->addr.sin_port != port)
  678. continue;
  679. if (!list_empty(&conn->err_link))
  680. continue;
  681. rxrpc_get_connection(conn);
  682. list_add_tail(&conn->err_link, &connq);
  683. }
  684. read_unlock(&peer->conn_lock);
  685. /* service all those connections */
  686. while (!list_empty(&connq)) {
  687. conn = list_entry(connq.next,
  688. struct rxrpc_connection,
  689. err_link);
  690. list_del(&conn->err_link);
  691. rxrpc_conn_handle_error(conn, local, err);
  692. rxrpc_put_connection(conn);
  693. }
  694. rxrpc_put_peer(peer);
  695. }
  696. }
  697. _leave("");
  698. return;
  699. } /* end rxrpc_trans_receive_error_report() */