pep.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * File: pep.c
  4. *
  5. * Phonet pipe protocol end point socket
  6. *
  7. * Copyright (C) 2008 Nokia Corporation.
  8. *
  9. * Author: Rémi Denis-Courmont
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/sched/signal.h>
  13. #include <linux/slab.h>
  14. #include <linux/socket.h>
  15. #include <net/sock.h>
  16. #include <net/tcp_states.h>
  17. #include <asm/ioctls.h>
  18. #include <linux/phonet.h>
  19. #include <linux/module.h>
  20. #include <net/phonet/phonet.h>
  21. #include <net/phonet/pep.h>
  22. #include <net/phonet/gprs.h>
  23. /* sk_state values:
  24. * TCP_CLOSE sock not in use yet
  25. * TCP_CLOSE_WAIT disconnected pipe
  26. * TCP_LISTEN listening pipe endpoint
  27. * TCP_SYN_RECV connected pipe in disabled state
  28. * TCP_ESTABLISHED connected pipe in enabled state
  29. *
  30. * pep_sock locking:
  31. * - sk_state, hlist: sock lock needed
  32. * - listener: read only
  33. * - pipe_handle: read only
  34. */
  35. #define CREDITS_MAX 10
  36. #define CREDITS_THR 7
  37. #define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */
  38. /* Get the next TLV sub-block. */
  39. static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen,
  40. void *buf)
  41. {
  42. void *data = NULL;
  43. struct {
  44. u8 sb_type;
  45. u8 sb_len;
  46. } *ph, h;
  47. int buflen = *plen;
  48. ph = skb_header_pointer(skb, 0, 2, &h);
  49. if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len))
  50. return NULL;
  51. ph->sb_len -= 2;
  52. *ptype = ph->sb_type;
  53. *plen = ph->sb_len;
  54. if (buflen > ph->sb_len)
  55. buflen = ph->sb_len;
  56. data = skb_header_pointer(skb, 2, buflen, buf);
  57. __skb_pull(skb, 2 + ph->sb_len);
  58. return data;
  59. }
  60. static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload,
  61. int len, gfp_t priority)
  62. {
  63. struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
  64. if (!skb)
  65. return NULL;
  66. skb_set_owner_w(skb, sk);
  67. skb_reserve(skb, MAX_PNPIPE_HEADER);
  68. __skb_put(skb, len);
  69. skb_copy_to_linear_data(skb, payload, len);
  70. __skb_push(skb, sizeof(struct pnpipehdr));
  71. skb_reset_transport_header(skb);
  72. return skb;
  73. }
  74. static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code,
  75. const void *data, int len, gfp_t priority)
  76. {
  77. const struct pnpipehdr *oph = pnp_hdr(oskb);
  78. struct pnpipehdr *ph;
  79. struct sk_buff *skb;
  80. struct sockaddr_pn peer;
  81. skb = pep_alloc_skb(sk, data, len, priority);
  82. if (!skb)
  83. return -ENOMEM;
  84. ph = pnp_hdr(skb);
  85. ph->utid = oph->utid;
  86. ph->message_id = oph->message_id + 1; /* REQ -> RESP */
  87. ph->pipe_handle = oph->pipe_handle;
  88. ph->error_code = code;
  89. pn_skb_get_src_sockaddr(oskb, &peer);
  90. return pn_skb_send(sk, skb, &peer);
  91. }
  92. static int pep_indicate(struct sock *sk, u8 id, u8 code,
  93. const void *data, int len, gfp_t priority)
  94. {
  95. struct pep_sock *pn = pep_sk(sk);
  96. struct pnpipehdr *ph;
  97. struct sk_buff *skb;
  98. skb = pep_alloc_skb(sk, data, len, priority);
  99. if (!skb)
  100. return -ENOMEM;
  101. ph = pnp_hdr(skb);
  102. ph->utid = 0;
  103. ph->message_id = id;
  104. ph->pipe_handle = pn->pipe_handle;
  105. ph->error_code = code;
  106. return pn_skb_send(sk, skb, NULL);
  107. }
  108. #define PAD 0x00
  109. static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
  110. const void *data, int len)
  111. {
  112. struct pep_sock *pn = pep_sk(sk);
  113. struct pnpipehdr *ph;
  114. struct sk_buff *skb;
  115. skb = pep_alloc_skb(sk, data, len, GFP_KERNEL);
  116. if (!skb)
  117. return -ENOMEM;
  118. ph = pnp_hdr(skb);
  119. ph->utid = id; /* whatever */
  120. ph->message_id = id;
  121. ph->pipe_handle = pn->pipe_handle;
  122. ph->error_code = code;
  123. return pn_skb_send(sk, skb, NULL);
  124. }
  125. static int pipe_handler_send_created_ind(struct sock *sk)
  126. {
  127. struct pep_sock *pn = pep_sk(sk);
  128. u8 data[4] = {
  129. PN_PIPE_SB_NEGOTIATED_FC, pep_sb_size(2),
  130. pn->tx_fc, pn->rx_fc,
  131. };
  132. return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */,
  133. data, 4, GFP_ATOMIC);
  134. }
  135. static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
  136. {
  137. static const u8 data[20] = {
  138. PAD, PAD, PAD, 2 /* sub-blocks */,
  139. PN_PIPE_SB_REQUIRED_FC_TX, pep_sb_size(5), 3, PAD,
  140. PN_MULTI_CREDIT_FLOW_CONTROL,
  141. PN_ONE_CREDIT_FLOW_CONTROL,
  142. PN_LEGACY_FLOW_CONTROL,
  143. PAD,
  144. PN_PIPE_SB_PREFERRED_FC_RX, pep_sb_size(5), 3, PAD,
  145. PN_MULTI_CREDIT_FLOW_CONTROL,
  146. PN_ONE_CREDIT_FLOW_CONTROL,
  147. PN_LEGACY_FLOW_CONTROL,
  148. PAD,
  149. };
  150. might_sleep();
  151. return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data),
  152. GFP_KERNEL);
  153. }
  154. static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code,
  155. gfp_t priority)
  156. {
  157. static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ };
  158. WARN_ON(code == PN_PIPE_NO_ERROR);
  159. return pep_reply(sk, skb, code, data, sizeof(data), priority);
  160. }
  161. /* Control requests are not sent by the pipe service and have a specific
  162. * message format. */
  163. static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
  164. gfp_t priority)
  165. {
  166. const struct pnpipehdr *oph = pnp_hdr(oskb);
  167. struct sk_buff *skb;
  168. struct pnpipehdr *ph;
  169. struct sockaddr_pn dst;
  170. u8 data[4] = {
  171. oph->pep_type, /* PEP type */
  172. code, /* error code, at an unusual offset */
  173. PAD, PAD,
  174. };
  175. skb = pep_alloc_skb(sk, data, 4, priority);
  176. if (!skb)
  177. return -ENOMEM;
  178. ph = pnp_hdr(skb);
  179. ph->utid = oph->utid;
  180. ph->message_id = PNS_PEP_CTRL_RESP;
  181. ph->pipe_handle = oph->pipe_handle;
  182. ph->data0 = oph->data[0]; /* CTRL id */
  183. pn_skb_get_src_sockaddr(oskb, &dst);
  184. return pn_skb_send(sk, skb, &dst);
  185. }
  186. static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
  187. {
  188. u8 data[4] = { type, PAD, PAD, status };
  189. return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON,
  190. data, 4, priority);
  191. }
  192. /* Send our RX flow control information to the sender.
  193. * Socket must be locked. */
  194. static void pipe_grant_credits(struct sock *sk, gfp_t priority)
  195. {
  196. struct pep_sock *pn = pep_sk(sk);
  197. BUG_ON(sk->sk_state != TCP_ESTABLISHED);
  198. switch (pn->rx_fc) {
  199. case PN_LEGACY_FLOW_CONTROL: /* TODO */
  200. break;
  201. case PN_ONE_CREDIT_FLOW_CONTROL:
  202. if (pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
  203. PEP_IND_READY, priority) == 0)
  204. pn->rx_credits = 1;
  205. break;
  206. case PN_MULTI_CREDIT_FLOW_CONTROL:
  207. if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX)
  208. break;
  209. if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
  210. CREDITS_MAX - pn->rx_credits,
  211. priority) == 0)
  212. pn->rx_credits = CREDITS_MAX;
  213. break;
  214. }
  215. }
  216. static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
  217. {
  218. struct pep_sock *pn = pep_sk(sk);
  219. struct pnpipehdr *hdr;
  220. int wake = 0;
  221. if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
  222. return -EINVAL;
  223. hdr = pnp_hdr(skb);
  224. if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
  225. net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
  226. (unsigned int)hdr->pep_type);
  227. return -EOPNOTSUPP;
  228. }
  229. switch (hdr->data[0]) {
  230. case PN_PEP_IND_FLOW_CONTROL:
  231. switch (pn->tx_fc) {
  232. case PN_LEGACY_FLOW_CONTROL:
  233. switch (hdr->data[3]) {
  234. case PEP_IND_BUSY:
  235. atomic_set(&pn->tx_credits, 0);
  236. break;
  237. case PEP_IND_READY:
  238. atomic_set(&pn->tx_credits, wake = 1);
  239. break;
  240. }
  241. break;
  242. case PN_ONE_CREDIT_FLOW_CONTROL:
  243. if (hdr->data[3] == PEP_IND_READY)
  244. atomic_set(&pn->tx_credits, wake = 1);
  245. break;
  246. }
  247. break;
  248. case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
  249. if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
  250. break;
  251. atomic_add(wake = hdr->data[3], &pn->tx_credits);
  252. break;
  253. default:
  254. net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
  255. (unsigned int)hdr->data[0]);
  256. return -EOPNOTSUPP;
  257. }
  258. if (wake)
  259. sk->sk_write_space(sk);
  260. return 0;
  261. }
  262. static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
  263. {
  264. struct pep_sock *pn = pep_sk(sk);
  265. struct pnpipehdr *hdr = pnp_hdr(skb);
  266. u8 n_sb = hdr->data0;
  267. pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
  268. __skb_pull(skb, sizeof(*hdr));
  269. while (n_sb > 0) {
  270. u8 type, buf[2], len = sizeof(buf);
  271. u8 *data = pep_get_sb(skb, &type, &len, buf);
  272. if (data == NULL)
  273. return -EINVAL;
  274. switch (type) {
  275. case PN_PIPE_SB_NEGOTIATED_FC:
  276. if (len < 2 || (data[0] | data[1]) > 3)
  277. break;
  278. pn->tx_fc = data[0] & 3;
  279. pn->rx_fc = data[1] & 3;
  280. break;
  281. }
  282. n_sb--;
  283. }
  284. return 0;
  285. }
  286. /* Queue an skb to a connected sock.
  287. * Socket lock must be held. */
  288. static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
  289. {
  290. struct pep_sock *pn = pep_sk(sk);
  291. struct pnpipehdr *hdr = pnp_hdr(skb);
  292. struct sk_buff_head *queue;
  293. int err = 0;
  294. BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
  295. switch (hdr->message_id) {
  296. case PNS_PEP_CONNECT_REQ:
  297. pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
  298. break;
  299. case PNS_PEP_DISCONNECT_REQ:
  300. pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
  301. sk->sk_state = TCP_CLOSE_WAIT;
  302. if (!sock_flag(sk, SOCK_DEAD))
  303. sk->sk_state_change(sk);
  304. break;
  305. case PNS_PEP_ENABLE_REQ:
  306. /* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
  307. pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
  308. break;
  309. case PNS_PEP_RESET_REQ:
  310. switch (hdr->state_after_reset) {
  311. case PN_PIPE_DISABLE:
  312. pn->init_enable = 0;
  313. break;
  314. case PN_PIPE_ENABLE:
  315. pn->init_enable = 1;
  316. break;
  317. default: /* not allowed to send an error here!? */
  318. err = -EINVAL;
  319. goto out;
  320. }
  321. fallthrough;
  322. case PNS_PEP_DISABLE_REQ:
  323. atomic_set(&pn->tx_credits, 0);
  324. pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
  325. break;
  326. case PNS_PEP_CTRL_REQ:
  327. if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
  328. atomic_inc(&sk->sk_drops);
  329. break;
  330. }
  331. __skb_pull(skb, 4);
  332. queue = &pn->ctrlreq_queue;
  333. goto queue;
  334. case PNS_PIPE_ALIGNED_DATA:
  335. __skb_pull(skb, 1);
  336. fallthrough;
  337. case PNS_PIPE_DATA:
  338. __skb_pull(skb, 3); /* Pipe data header */
  339. if (!pn_flow_safe(pn->rx_fc)) {
  340. err = sock_queue_rcv_skb(sk, skb);
  341. if (!err)
  342. return NET_RX_SUCCESS;
  343. err = -ENOBUFS;
  344. break;
  345. }
  346. if (pn->rx_credits == 0) {
  347. atomic_inc(&sk->sk_drops);
  348. err = -ENOBUFS;
  349. break;
  350. }
  351. pn->rx_credits--;
  352. queue = &sk->sk_receive_queue;
  353. goto queue;
  354. case PNS_PEP_STATUS_IND:
  355. pipe_rcv_status(sk, skb);
  356. break;
  357. case PNS_PIPE_REDIRECTED_IND:
  358. err = pipe_rcv_created(sk, skb);
  359. break;
  360. case PNS_PIPE_CREATED_IND:
  361. err = pipe_rcv_created(sk, skb);
  362. if (err)
  363. break;
  364. fallthrough;
  365. case PNS_PIPE_RESET_IND:
  366. if (!pn->init_enable)
  367. break;
  368. fallthrough;
  369. case PNS_PIPE_ENABLED_IND:
  370. if (!pn_flow_safe(pn->tx_fc)) {
  371. atomic_set(&pn->tx_credits, 1);
  372. sk->sk_write_space(sk);
  373. }
  374. if (sk->sk_state == TCP_ESTABLISHED)
  375. break; /* Nothing to do */
  376. sk->sk_state = TCP_ESTABLISHED;
  377. pipe_grant_credits(sk, GFP_ATOMIC);
  378. break;
  379. case PNS_PIPE_DISABLED_IND:
  380. sk->sk_state = TCP_SYN_RECV;
  381. pn->rx_credits = 0;
  382. break;
  383. default:
  384. net_dbg_ratelimited("Phonet unknown PEP message: %u\n",
  385. hdr->message_id);
  386. err = -EINVAL;
  387. }
  388. out:
  389. kfree_skb(skb);
  390. return (err == -ENOBUFS) ? NET_RX_DROP : NET_RX_SUCCESS;
  391. queue:
  392. skb->dev = NULL;
  393. skb_set_owner_r(skb, sk);
  394. skb_queue_tail(queue, skb);
  395. if (!sock_flag(sk, SOCK_DEAD))
  396. sk->sk_data_ready(sk);
  397. return NET_RX_SUCCESS;
  398. }
  399. /* Destroy connected sock. */
  400. static void pipe_destruct(struct sock *sk)
  401. {
  402. struct pep_sock *pn = pep_sk(sk);
  403. skb_queue_purge(&sk->sk_receive_queue);
  404. skb_queue_purge(&pn->ctrlreq_queue);
  405. }
  406. static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n)
  407. {
  408. unsigned int i;
  409. u8 final_fc = PN_NO_FLOW_CONTROL;
  410. for (i = 0; i < n; i++) {
  411. u8 fc = fcs[i];
  412. if (fc > final_fc && fc < PN_MAX_FLOW_CONTROL)
  413. final_fc = fc;
  414. }
  415. return final_fc;
  416. }
  417. static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
  418. {
  419. struct pep_sock *pn = pep_sk(sk);
  420. struct pnpipehdr *hdr;
  421. u8 n_sb;
  422. if (!pskb_pull(skb, sizeof(*hdr) + 4))
  423. return -EINVAL;
  424. hdr = pnp_hdr(skb);
  425. if (hdr->error_code != PN_PIPE_NO_ERROR)
  426. return -ECONNREFUSED;
  427. /* Parse sub-blocks */
  428. n_sb = hdr->data[3];
  429. while (n_sb > 0) {
  430. u8 type, buf[6], len = sizeof(buf);
  431. const u8 *data = pep_get_sb(skb, &type, &len, buf);
  432. if (data == NULL)
  433. return -EINVAL;
  434. switch (type) {
  435. case PN_PIPE_SB_REQUIRED_FC_TX:
  436. if (len < 2 || len < data[0])
  437. break;
  438. pn->tx_fc = pipe_negotiate_fc(data + 2, len - 2);
  439. break;
  440. case PN_PIPE_SB_PREFERRED_FC_RX:
  441. if (len < 2 || len < data[0])
  442. break;
  443. pn->rx_fc = pipe_negotiate_fc(data + 2, len - 2);
  444. break;
  445. }
  446. n_sb--;
  447. }
  448. return pipe_handler_send_created_ind(sk);
  449. }
  450. static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb)
  451. {
  452. struct pnpipehdr *hdr = pnp_hdr(skb);
  453. if (hdr->error_code != PN_PIPE_NO_ERROR)
  454. return -ECONNREFUSED;
  455. return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */,
  456. NULL, 0, GFP_ATOMIC);
  457. }
  458. static void pipe_start_flow_control(struct sock *sk)
  459. {
  460. struct pep_sock *pn = pep_sk(sk);
  461. if (!pn_flow_safe(pn->tx_fc)) {
  462. atomic_set(&pn->tx_credits, 1);
  463. sk->sk_write_space(sk);
  464. }
  465. pipe_grant_credits(sk, GFP_ATOMIC);
  466. }
  467. /* Queue an skb to an actively connected sock.
  468. * Socket lock must be held. */
  469. static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
  470. {
  471. struct pep_sock *pn = pep_sk(sk);
  472. struct pnpipehdr *hdr = pnp_hdr(skb);
  473. int err = NET_RX_SUCCESS;
  474. switch (hdr->message_id) {
  475. case PNS_PIPE_ALIGNED_DATA:
  476. __skb_pull(skb, 1);
  477. fallthrough;
  478. case PNS_PIPE_DATA:
  479. __skb_pull(skb, 3); /* Pipe data header */
  480. if (!pn_flow_safe(pn->rx_fc)) {
  481. err = sock_queue_rcv_skb(sk, skb);
  482. if (!err)
  483. return NET_RX_SUCCESS;
  484. err = NET_RX_DROP;
  485. break;
  486. }
  487. if (pn->rx_credits == 0) {
  488. atomic_inc(&sk->sk_drops);
  489. err = NET_RX_DROP;
  490. break;
  491. }
  492. pn->rx_credits--;
  493. skb->dev = NULL;
  494. skb_set_owner_r(skb, sk);
  495. skb_queue_tail(&sk->sk_receive_queue, skb);
  496. if (!sock_flag(sk, SOCK_DEAD))
  497. sk->sk_data_ready(sk);
  498. return NET_RX_SUCCESS;
  499. case PNS_PEP_CONNECT_RESP:
  500. if (sk->sk_state != TCP_SYN_SENT)
  501. break;
  502. if (!sock_flag(sk, SOCK_DEAD))
  503. sk->sk_state_change(sk);
  504. if (pep_connresp_rcv(sk, skb)) {
  505. sk->sk_state = TCP_CLOSE_WAIT;
  506. break;
  507. }
  508. if (pn->init_enable == PN_PIPE_DISABLE)
  509. sk->sk_state = TCP_SYN_RECV;
  510. else {
  511. sk->sk_state = TCP_ESTABLISHED;
  512. pipe_start_flow_control(sk);
  513. }
  514. break;
  515. case PNS_PEP_ENABLE_RESP:
  516. if (sk->sk_state != TCP_SYN_SENT)
  517. break;
  518. if (pep_enableresp_rcv(sk, skb)) {
  519. sk->sk_state = TCP_CLOSE_WAIT;
  520. break;
  521. }
  522. sk->sk_state = TCP_ESTABLISHED;
  523. pipe_start_flow_control(sk);
  524. break;
  525. case PNS_PEP_DISCONNECT_RESP:
  526. /* sock should already be dead, nothing to do */
  527. break;
  528. case PNS_PEP_STATUS_IND:
  529. pipe_rcv_status(sk, skb);
  530. break;
  531. }
  532. kfree_skb(skb);
  533. return err;
  534. }
  535. /* Listening sock must be locked */
  536. static struct sock *pep_find_pipe(const struct hlist_head *hlist,
  537. const struct sockaddr_pn *dst,
  538. u8 pipe_handle)
  539. {
  540. struct sock *sknode;
  541. u16 dobj = pn_sockaddr_get_object(dst);
  542. sk_for_each(sknode, hlist) {
  543. struct pep_sock *pnnode = pep_sk(sknode);
  544. /* Ports match, but addresses might not: */
  545. if (pnnode->pn_sk.sobject != dobj)
  546. continue;
  547. if (pnnode->pipe_handle != pipe_handle)
  548. continue;
  549. if (sknode->sk_state == TCP_CLOSE_WAIT)
  550. continue;
  551. sock_hold(sknode);
  552. return sknode;
  553. }
  554. return NULL;
  555. }
  556. /*
  557. * Deliver an skb to a listening sock.
  558. * Socket lock must be held.
  559. * We then queue the skb to the right connected sock (if any).
  560. */
  561. static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
  562. {
  563. struct pep_sock *pn = pep_sk(sk);
  564. struct sock *sknode;
  565. struct pnpipehdr *hdr;
  566. struct sockaddr_pn dst;
  567. u8 pipe_handle;
  568. if (!pskb_may_pull(skb, sizeof(*hdr)))
  569. goto drop;
  570. hdr = pnp_hdr(skb);
  571. pipe_handle = hdr->pipe_handle;
  572. if (pipe_handle == PN_PIPE_INVALID_HANDLE)
  573. goto drop;
  574. pn_skb_get_dst_sockaddr(skb, &dst);
  575. /* Look for an existing pipe handle */
  576. sknode = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
  577. if (sknode)
  578. return sk_receive_skb(sknode, skb, 1);
  579. switch (hdr->message_id) {
  580. case PNS_PEP_CONNECT_REQ:
  581. if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) {
  582. pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE,
  583. GFP_ATOMIC);
  584. break;
  585. }
  586. skb_queue_head(&sk->sk_receive_queue, skb);
  587. sk_acceptq_added(sk);
  588. if (!sock_flag(sk, SOCK_DEAD))
  589. sk->sk_data_ready(sk);
  590. return NET_RX_SUCCESS;
  591. case PNS_PEP_DISCONNECT_REQ:
  592. pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
  593. break;
  594. case PNS_PEP_CTRL_REQ:
  595. pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
  596. break;
  597. case PNS_PEP_RESET_REQ:
  598. case PNS_PEP_ENABLE_REQ:
  599. case PNS_PEP_DISABLE_REQ:
  600. /* invalid handle is not even allowed here! */
  601. break;
  602. default:
  603. if ((1 << sk->sk_state)
  604. & ~(TCPF_CLOSE|TCPF_LISTEN|TCPF_CLOSE_WAIT))
  605. /* actively connected socket */
  606. return pipe_handler_do_rcv(sk, skb);
  607. }
  608. drop:
  609. kfree_skb(skb);
  610. return NET_RX_SUCCESS;
  611. }
  612. static int pipe_do_remove(struct sock *sk)
  613. {
  614. struct pep_sock *pn = pep_sk(sk);
  615. struct pnpipehdr *ph;
  616. struct sk_buff *skb;
  617. skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL);
  618. if (!skb)
  619. return -ENOMEM;
  620. ph = pnp_hdr(skb);
  621. ph->utid = 0;
  622. ph->message_id = PNS_PIPE_REMOVE_REQ;
  623. ph->pipe_handle = pn->pipe_handle;
  624. ph->data0 = PAD;
  625. return pn_skb_send(sk, skb, NULL);
  626. }
  627. /* associated socket ceases to exist */
  628. static void pep_sock_close(struct sock *sk, long timeout)
  629. {
  630. struct pep_sock *pn = pep_sk(sk);
  631. int ifindex = 0;
  632. sock_hold(sk); /* keep a reference after sk_common_release() */
  633. sk_common_release(sk);
  634. lock_sock(sk);
  635. if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
  636. if (sk->sk_backlog_rcv == pipe_do_rcv)
  637. /* Forcefully remove dangling Phonet pipe */
  638. pipe_do_remove(sk);
  639. else
  640. pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD,
  641. NULL, 0);
  642. }
  643. sk->sk_state = TCP_CLOSE;
  644. ifindex = pn->ifindex;
  645. pn->ifindex = 0;
  646. release_sock(sk);
  647. if (ifindex)
  648. gprs_detach(sk);
  649. sock_put(sk);
  650. }
  651. static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
  652. bool kern)
  653. {
  654. struct pep_sock *pn = pep_sk(sk), *newpn;
  655. struct sock *newsk = NULL;
  656. struct sk_buff *skb;
  657. struct pnpipehdr *hdr;
  658. struct sockaddr_pn dst, src;
  659. int err;
  660. u16 peer_type;
  661. u8 pipe_handle, enabled, n_sb;
  662. u8 aligned = 0;
  663. skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp);
  664. if (!skb)
  665. return NULL;
  666. lock_sock(sk);
  667. if (sk->sk_state != TCP_LISTEN) {
  668. err = -EINVAL;
  669. goto drop;
  670. }
  671. sk_acceptq_removed(sk);
  672. err = -EPROTO;
  673. if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
  674. goto drop;
  675. hdr = pnp_hdr(skb);
  676. pipe_handle = hdr->pipe_handle;
  677. switch (hdr->state_after_connect) {
  678. case PN_PIPE_DISABLE:
  679. enabled = 0;
  680. break;
  681. case PN_PIPE_ENABLE:
  682. enabled = 1;
  683. break;
  684. default:
  685. pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM,
  686. GFP_KERNEL);
  687. goto drop;
  688. }
  689. peer_type = hdr->other_pep_type << 8;
  690. /* Parse sub-blocks (options) */
  691. n_sb = hdr->data[3];
  692. while (n_sb > 0) {
  693. u8 type, buf[1], len = sizeof(buf);
  694. const u8 *data = pep_get_sb(skb, &type, &len, buf);
  695. if (data == NULL)
  696. goto drop;
  697. switch (type) {
  698. case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
  699. if (len < 1)
  700. goto drop;
  701. peer_type = (peer_type & 0xff00) | data[0];
  702. break;
  703. case PN_PIPE_SB_ALIGNED_DATA:
  704. aligned = data[0] != 0;
  705. break;
  706. }
  707. n_sb--;
  708. }
  709. /* Check for duplicate pipe handle */
  710. newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
  711. if (unlikely(newsk)) {
  712. __sock_put(newsk);
  713. newsk = NULL;
  714. pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL);
  715. goto drop;
  716. }
  717. /* Create a new to-be-accepted sock */
  718. newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot,
  719. kern);
  720. if (!newsk) {
  721. pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
  722. err = -ENOBUFS;
  723. goto drop;
  724. }
  725. sock_init_data(NULL, newsk);
  726. newsk->sk_state = TCP_SYN_RECV;
  727. newsk->sk_backlog_rcv = pipe_do_rcv;
  728. newsk->sk_protocol = sk->sk_protocol;
  729. newsk->sk_destruct = pipe_destruct;
  730. newpn = pep_sk(newsk);
  731. pn_skb_get_dst_sockaddr(skb, &dst);
  732. pn_skb_get_src_sockaddr(skb, &src);
  733. newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
  734. newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
  735. newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst);
  736. sock_hold(sk);
  737. newpn->listener = sk;
  738. skb_queue_head_init(&newpn->ctrlreq_queue);
  739. newpn->pipe_handle = pipe_handle;
  740. atomic_set(&newpn->tx_credits, 0);
  741. newpn->ifindex = 0;
  742. newpn->peer_type = peer_type;
  743. newpn->rx_credits = 0;
  744. newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
  745. newpn->init_enable = enabled;
  746. newpn->aligned = aligned;
  747. err = pep_accept_conn(newsk, skb);
  748. if (err) {
  749. __sock_put(sk);
  750. sock_put(newsk);
  751. newsk = NULL;
  752. goto drop;
  753. }
  754. sk_add_node(newsk, &pn->hlist);
  755. drop:
  756. release_sock(sk);
  757. kfree_skb(skb);
  758. *errp = err;
  759. return newsk;
  760. }
  761. static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
  762. {
  763. struct pep_sock *pn = pep_sk(sk);
  764. int err;
  765. u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
  766. if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE)
  767. pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
  768. err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
  769. pn->init_enable, data, 4);
  770. if (err) {
  771. pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
  772. return err;
  773. }
  774. sk->sk_state = TCP_SYN_SENT;
  775. return 0;
  776. }
  777. static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
  778. {
  779. int err;
  780. err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD,
  781. NULL, 0);
  782. if (err)
  783. return err;
  784. sk->sk_state = TCP_SYN_SENT;
  785. return 0;
  786. }
  787. static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
  788. {
  789. struct pep_sock *pn = pep_sk(sk);
  790. int answ;
  791. int ret = -ENOIOCTLCMD;
  792. switch (cmd) {
  793. case SIOCINQ:
  794. if (sk->sk_state == TCP_LISTEN) {
  795. ret = -EINVAL;
  796. break;
  797. }
  798. lock_sock(sk);
  799. if (sock_flag(sk, SOCK_URGINLINE) &&
  800. !skb_queue_empty(&pn->ctrlreq_queue))
  801. answ = skb_peek(&pn->ctrlreq_queue)->len;
  802. else if (!skb_queue_empty(&sk->sk_receive_queue))
  803. answ = skb_peek(&sk->sk_receive_queue)->len;
  804. else
  805. answ = 0;
  806. release_sock(sk);
  807. ret = put_user(answ, (int __user *)arg);
  808. break;
  809. case SIOCPNENABLEPIPE:
  810. lock_sock(sk);
  811. if (sk->sk_state == TCP_SYN_SENT)
  812. ret = -EBUSY;
  813. else if (sk->sk_state == TCP_ESTABLISHED)
  814. ret = -EISCONN;
  815. else if (!pn->pn_sk.sobject)
  816. ret = -EADDRNOTAVAIL;
  817. else
  818. ret = pep_sock_enable(sk, NULL, 0);
  819. release_sock(sk);
  820. break;
  821. }
  822. return ret;
  823. }
  824. static int pep_init(struct sock *sk)
  825. {
  826. struct pep_sock *pn = pep_sk(sk);
  827. sk->sk_destruct = pipe_destruct;
  828. INIT_HLIST_HEAD(&pn->hlist);
  829. pn->listener = NULL;
  830. skb_queue_head_init(&pn->ctrlreq_queue);
  831. atomic_set(&pn->tx_credits, 0);
  832. pn->ifindex = 0;
  833. pn->peer_type = 0;
  834. pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
  835. pn->rx_credits = 0;
  836. pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
  837. pn->init_enable = 1;
  838. pn->aligned = 0;
  839. return 0;
  840. }
  841. static int pep_setsockopt(struct sock *sk, int level, int optname,
  842. sockptr_t optval, unsigned int optlen)
  843. {
  844. struct pep_sock *pn = pep_sk(sk);
  845. int val = 0, err = 0;
  846. if (level != SOL_PNPIPE)
  847. return -ENOPROTOOPT;
  848. if (optlen >= sizeof(int)) {
  849. if (copy_from_sockptr(&val, optval, sizeof(int)))
  850. return -EFAULT;
  851. }
  852. lock_sock(sk);
  853. switch (optname) {
  854. case PNPIPE_ENCAP:
  855. if (val && val != PNPIPE_ENCAP_IP) {
  856. err = -EINVAL;
  857. break;
  858. }
  859. if (!pn->ifindex == !val)
  860. break; /* Nothing to do! */
  861. if (!capable(CAP_NET_ADMIN)) {
  862. err = -EPERM;
  863. break;
  864. }
  865. if (val) {
  866. release_sock(sk);
  867. err = gprs_attach(sk);
  868. if (err > 0) {
  869. pn->ifindex = err;
  870. err = 0;
  871. }
  872. } else {
  873. pn->ifindex = 0;
  874. release_sock(sk);
  875. gprs_detach(sk);
  876. err = 0;
  877. }
  878. goto out_norel;
  879. case PNPIPE_HANDLE:
  880. if ((sk->sk_state == TCP_CLOSE) &&
  881. (val >= 0) && (val < PN_PIPE_INVALID_HANDLE))
  882. pn->pipe_handle = val;
  883. else
  884. err = -EINVAL;
  885. break;
  886. case PNPIPE_INITSTATE:
  887. pn->init_enable = !!val;
  888. break;
  889. default:
  890. err = -ENOPROTOOPT;
  891. }
  892. release_sock(sk);
  893. out_norel:
  894. return err;
  895. }
  896. static int pep_getsockopt(struct sock *sk, int level, int optname,
  897. char __user *optval, int __user *optlen)
  898. {
  899. struct pep_sock *pn = pep_sk(sk);
  900. int len, val;
  901. if (level != SOL_PNPIPE)
  902. return -ENOPROTOOPT;
  903. if (get_user(len, optlen))
  904. return -EFAULT;
  905. switch (optname) {
  906. case PNPIPE_ENCAP:
  907. val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE;
  908. break;
  909. case PNPIPE_IFINDEX:
  910. val = pn->ifindex;
  911. break;
  912. case PNPIPE_HANDLE:
  913. val = pn->pipe_handle;
  914. if (val == PN_PIPE_INVALID_HANDLE)
  915. return -EINVAL;
  916. break;
  917. case PNPIPE_INITSTATE:
  918. val = pn->init_enable;
  919. break;
  920. default:
  921. return -ENOPROTOOPT;
  922. }
  923. len = min_t(unsigned int, sizeof(int), len);
  924. if (put_user(len, optlen))
  925. return -EFAULT;
  926. if (put_user(val, (int __user *) optval))
  927. return -EFAULT;
  928. return 0;
  929. }
  930. static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
  931. {
  932. struct pep_sock *pn = pep_sk(sk);
  933. struct pnpipehdr *ph;
  934. int err;
  935. if (pn_flow_safe(pn->tx_fc) &&
  936. !atomic_add_unless(&pn->tx_credits, -1, 0)) {
  937. kfree_skb(skb);
  938. return -ENOBUFS;
  939. }
  940. skb_push(skb, 3 + pn->aligned);
  941. skb_reset_transport_header(skb);
  942. ph = pnp_hdr(skb);
  943. ph->utid = 0;
  944. if (pn->aligned) {
  945. ph->message_id = PNS_PIPE_ALIGNED_DATA;
  946. ph->data0 = 0; /* padding */
  947. } else
  948. ph->message_id = PNS_PIPE_DATA;
  949. ph->pipe_handle = pn->pipe_handle;
  950. err = pn_skb_send(sk, skb, NULL);
  951. if (err && pn_flow_safe(pn->tx_fc))
  952. atomic_inc(&pn->tx_credits);
  953. return err;
  954. }
  955. static int pep_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
  956. {
  957. struct pep_sock *pn = pep_sk(sk);
  958. struct sk_buff *skb;
  959. long timeo;
  960. int flags = msg->msg_flags;
  961. int err, done;
  962. if (len > USHRT_MAX)
  963. return -EMSGSIZE;
  964. if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
  965. MSG_CMSG_COMPAT)) ||
  966. !(msg->msg_flags & MSG_EOR))
  967. return -EOPNOTSUPP;
  968. skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
  969. flags & MSG_DONTWAIT, &err);
  970. if (!skb)
  971. return err;
  972. skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned);
  973. err = memcpy_from_msg(skb_put(skb, len), msg, len);
  974. if (err < 0)
  975. goto outfree;
  976. lock_sock(sk);
  977. timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  978. if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
  979. err = -ENOTCONN;
  980. goto out;
  981. }
  982. if (sk->sk_state != TCP_ESTABLISHED) {
  983. /* Wait until the pipe gets to enabled state */
  984. disabled:
  985. err = sk_stream_wait_connect(sk, &timeo);
  986. if (err)
  987. goto out;
  988. if (sk->sk_state == TCP_CLOSE_WAIT) {
  989. err = -ECONNRESET;
  990. goto out;
  991. }
  992. }
  993. BUG_ON(sk->sk_state != TCP_ESTABLISHED);
  994. /* Wait until flow control allows TX */
  995. done = atomic_read(&pn->tx_credits);
  996. while (!done) {
  997. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  998. if (!timeo) {
  999. err = -EAGAIN;
  1000. goto out;
  1001. }
  1002. if (signal_pending(current)) {
  1003. err = sock_intr_errno(timeo);
  1004. goto out;
  1005. }
  1006. add_wait_queue(sk_sleep(sk), &wait);
  1007. done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits), &wait);
  1008. remove_wait_queue(sk_sleep(sk), &wait);
  1009. if (sk->sk_state != TCP_ESTABLISHED)
  1010. goto disabled;
  1011. }
  1012. err = pipe_skb_send(sk, skb);
  1013. if (err >= 0)
  1014. err = len; /* success! */
  1015. skb = NULL;
  1016. out:
  1017. release_sock(sk);
  1018. outfree:
  1019. kfree_skb(skb);
  1020. return err;
  1021. }
  1022. int pep_writeable(struct sock *sk)
  1023. {
  1024. struct pep_sock *pn = pep_sk(sk);
  1025. return atomic_read(&pn->tx_credits);
  1026. }
  1027. int pep_write(struct sock *sk, struct sk_buff *skb)
  1028. {
  1029. struct sk_buff *rskb, *fs;
  1030. int flen = 0;
  1031. if (pep_sk(sk)->aligned)
  1032. return pipe_skb_send(sk, skb);
  1033. rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
  1034. if (!rskb) {
  1035. kfree_skb(skb);
  1036. return -ENOMEM;
  1037. }
  1038. skb_shinfo(rskb)->frag_list = skb;
  1039. rskb->len += skb->len;
  1040. rskb->data_len += rskb->len;
  1041. rskb->truesize += rskb->len;
  1042. /* Avoid nested fragments */
  1043. skb_walk_frags(skb, fs)
  1044. flen += fs->len;
  1045. skb->next = skb_shinfo(skb)->frag_list;
  1046. skb_frag_list_init(skb);
  1047. skb->len -= flen;
  1048. skb->data_len -= flen;
  1049. skb->truesize -= flen;
  1050. skb_reserve(rskb, MAX_PHONET_HEADER + 3);
  1051. return pipe_skb_send(sk, rskb);
  1052. }
  1053. struct sk_buff *pep_read(struct sock *sk)
  1054. {
  1055. struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
  1056. if (sk->sk_state == TCP_ESTABLISHED)
  1057. pipe_grant_credits(sk, GFP_ATOMIC);
  1058. return skb;
  1059. }
  1060. static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
  1061. int noblock, int flags, int *addr_len)
  1062. {
  1063. struct sk_buff *skb;
  1064. int err;
  1065. if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
  1066. MSG_NOSIGNAL|MSG_CMSG_COMPAT))
  1067. return -EOPNOTSUPP;
  1068. if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
  1069. return -ENOTCONN;
  1070. if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) {
  1071. /* Dequeue and acknowledge control request */
  1072. struct pep_sock *pn = pep_sk(sk);
  1073. if (flags & MSG_PEEK)
  1074. return -EOPNOTSUPP;
  1075. skb = skb_dequeue(&pn->ctrlreq_queue);
  1076. if (skb) {
  1077. pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
  1078. GFP_KERNEL);
  1079. msg->msg_flags |= MSG_OOB;
  1080. goto copy;
  1081. }
  1082. if (flags & MSG_OOB)
  1083. return -EINVAL;
  1084. }
  1085. skb = skb_recv_datagram(sk, flags, noblock, &err);
  1086. lock_sock(sk);
  1087. if (skb == NULL) {
  1088. if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT)
  1089. err = -ECONNRESET;
  1090. release_sock(sk);
  1091. return err;
  1092. }
  1093. if (sk->sk_state == TCP_ESTABLISHED)
  1094. pipe_grant_credits(sk, GFP_KERNEL);
  1095. release_sock(sk);
  1096. copy:
  1097. msg->msg_flags |= MSG_EOR;
  1098. if (skb->len > len)
  1099. msg->msg_flags |= MSG_TRUNC;
  1100. else
  1101. len = skb->len;
  1102. err = skb_copy_datagram_msg(skb, 0, msg, len);
  1103. if (!err)
  1104. err = (flags & MSG_TRUNC) ? skb->len : len;
  1105. skb_free_datagram(sk, skb);
  1106. return err;
  1107. }
  1108. static void pep_sock_unhash(struct sock *sk)
  1109. {
  1110. struct pep_sock *pn = pep_sk(sk);
  1111. struct sock *skparent = NULL;
  1112. lock_sock(sk);
  1113. if (pn->listener != NULL) {
  1114. skparent = pn->listener;
  1115. pn->listener = NULL;
  1116. release_sock(sk);
  1117. pn = pep_sk(skparent);
  1118. lock_sock(skparent);
  1119. sk_del_node_init(sk);
  1120. sk = skparent;
  1121. }
  1122. /* Unhash a listening sock only when it is closed
  1123. * and all of its active connected pipes are closed. */
  1124. if (hlist_empty(&pn->hlist))
  1125. pn_sock_unhash(&pn->pn_sk.sk);
  1126. release_sock(sk);
  1127. if (skparent)
  1128. sock_put(skparent);
  1129. }
  1130. static struct proto pep_proto = {
  1131. .close = pep_sock_close,
  1132. .accept = pep_sock_accept,
  1133. .connect = pep_sock_connect,
  1134. .ioctl = pep_ioctl,
  1135. .init = pep_init,
  1136. .setsockopt = pep_setsockopt,
  1137. .getsockopt = pep_getsockopt,
  1138. .sendmsg = pep_sendmsg,
  1139. .recvmsg = pep_recvmsg,
  1140. .backlog_rcv = pep_do_rcv,
  1141. .hash = pn_sock_hash,
  1142. .unhash = pep_sock_unhash,
  1143. .get_port = pn_sock_get_port,
  1144. .obj_size = sizeof(struct pep_sock),
  1145. .owner = THIS_MODULE,
  1146. .name = "PNPIPE",
  1147. };
  1148. static const struct phonet_protocol pep_pn_proto = {
  1149. .ops = &phonet_stream_ops,
  1150. .prot = &pep_proto,
  1151. .sock_type = SOCK_SEQPACKET,
  1152. };
  1153. static int __init pep_register(void)
  1154. {
  1155. return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto);
  1156. }
  1157. static void __exit pep_unregister(void)
  1158. {
  1159. phonet_proto_unregister(PN_PROTO_PIPE, &pep_pn_proto);
  1160. }
  1161. module_init(pep_register);
  1162. module_exit(pep_unregister);
  1163. MODULE_AUTHOR("Remi Denis-Courmont, Nokia");
  1164. MODULE_DESCRIPTION("Phonet pipe protocol");
  1165. MODULE_LICENSE("GPL");
  1166. MODULE_ALIAS_NET_PF_PROTO(PF_PHONET, PN_PROTO_PIPE);