nr_out.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. *
  4. * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
  5. * Copyright Darryl Miles G7LED (dlm@g7led.demon.co.uk)
  6. */
  7. #include <linux/errno.h>
  8. #include <linux/types.h>
  9. #include <linux/socket.h>
  10. #include <linux/in.h>
  11. #include <linux/kernel.h>
  12. #include <linux/timer.h>
  13. #include <linux/string.h>
  14. #include <linux/sockios.h>
  15. #include <linux/net.h>
  16. #include <linux/slab.h>
  17. #include <net/ax25.h>
  18. #include <linux/inet.h>
  19. #include <linux/netdevice.h>
  20. #include <linux/skbuff.h>
  21. #include <net/sock.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/fcntl.h>
  24. #include <linux/mm.h>
  25. #include <linux/interrupt.h>
  26. #include <net/netrom.h>
  27. /*
  28. * This is where all NET/ROM frames pass, except for IP-over-NET/ROM which
  29. * cannot be fragmented in this manner.
  30. */
  31. void nr_output(struct sock *sk, struct sk_buff *skb)
  32. {
  33. struct sk_buff *skbn;
  34. unsigned char transport[NR_TRANSPORT_LEN];
  35. int err, frontlen, len;
  36. if (skb->len - NR_TRANSPORT_LEN > NR_MAX_PACKET_SIZE) {
  37. /* Save a copy of the Transport Header */
  38. skb_copy_from_linear_data(skb, transport, NR_TRANSPORT_LEN);
  39. skb_pull(skb, NR_TRANSPORT_LEN);
  40. frontlen = skb_headroom(skb);
  41. while (skb->len > 0) {
  42. if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL)
  43. return;
  44. skb_reserve(skbn, frontlen);
  45. len = (NR_MAX_PACKET_SIZE > skb->len) ? skb->len : NR_MAX_PACKET_SIZE;
  46. /* Copy the user data */
  47. skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
  48. skb_pull(skb, len);
  49. /* Duplicate the Transport Header */
  50. skb_push(skbn, NR_TRANSPORT_LEN);
  51. skb_copy_to_linear_data(skbn, transport,
  52. NR_TRANSPORT_LEN);
  53. if (skb->len > 0)
  54. skbn->data[4] |= NR_MORE_FLAG;
  55. skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
  56. }
  57. kfree_skb(skb);
  58. } else {
  59. skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */
  60. }
  61. nr_kick(sk);
  62. }
  63. /*
  64. * This procedure is passed a buffer descriptor for an iframe. It builds
  65. * the rest of the control part of the frame and then writes it out.
  66. */
  67. static void nr_send_iframe(struct sock *sk, struct sk_buff *skb)
  68. {
  69. struct nr_sock *nr = nr_sk(sk);
  70. if (skb == NULL)
  71. return;
  72. skb->data[2] = nr->vs;
  73. skb->data[3] = nr->vr;
  74. if (nr->condition & NR_COND_OWN_RX_BUSY)
  75. skb->data[4] |= NR_CHOKE_FLAG;
  76. nr_start_idletimer(sk);
  77. nr_transmit_buffer(sk, skb);
  78. }
  79. void nr_send_nak_frame(struct sock *sk)
  80. {
  81. struct sk_buff *skb, *skbn;
  82. struct nr_sock *nr = nr_sk(sk);
  83. if ((skb = skb_peek(&nr->ack_queue)) == NULL)
  84. return;
  85. if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL)
  86. return;
  87. skbn->data[2] = nr->va;
  88. skbn->data[3] = nr->vr;
  89. if (nr->condition & NR_COND_OWN_RX_BUSY)
  90. skbn->data[4] |= NR_CHOKE_FLAG;
  91. nr_transmit_buffer(sk, skbn);
  92. nr->condition &= ~NR_COND_ACK_PENDING;
  93. nr->vl = nr->vr;
  94. nr_stop_t1timer(sk);
  95. }
  96. void nr_kick(struct sock *sk)
  97. {
  98. struct nr_sock *nr = nr_sk(sk);
  99. struct sk_buff *skb, *skbn;
  100. unsigned short start, end;
  101. if (nr->state != NR_STATE_3)
  102. return;
  103. if (nr->condition & NR_COND_PEER_RX_BUSY)
  104. return;
  105. if (!skb_peek(&sk->sk_write_queue))
  106. return;
  107. start = (skb_peek(&nr->ack_queue) == NULL) ? nr->va : nr->vs;
  108. end = (nr->va + nr->window) % NR_MODULUS;
  109. if (start == end)
  110. return;
  111. nr->vs = start;
  112. /*
  113. * Transmit data until either we're out of data to send or
  114. * the window is full.
  115. */
  116. /*
  117. * Dequeue the frame and copy it.
  118. */
  119. skb = skb_dequeue(&sk->sk_write_queue);
  120. do {
  121. if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
  122. skb_queue_head(&sk->sk_write_queue, skb);
  123. break;
  124. }
  125. skb_set_owner_w(skbn, sk);
  126. /*
  127. * Transmit the frame copy.
  128. */
  129. nr_send_iframe(sk, skbn);
  130. nr->vs = (nr->vs + 1) % NR_MODULUS;
  131. /*
  132. * Requeue the original data frame.
  133. */
  134. skb_queue_tail(&nr->ack_queue, skb);
  135. } while (nr->vs != end &&
  136. (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
  137. nr->vl = nr->vr;
  138. nr->condition &= ~NR_COND_ACK_PENDING;
  139. if (!nr_t1timer_running(sk))
  140. nr_start_t1timer(sk);
  141. }
  142. void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
  143. {
  144. struct nr_sock *nr = nr_sk(sk);
  145. unsigned char *dptr;
  146. /*
  147. * Add the protocol byte and network header.
  148. */
  149. dptr = skb_push(skb, NR_NETWORK_LEN);
  150. memcpy(dptr, &nr->source_addr, AX25_ADDR_LEN);
  151. dptr[6] &= ~AX25_CBIT;
  152. dptr[6] &= ~AX25_EBIT;
  153. dptr[6] |= AX25_SSSID_SPARE;
  154. dptr += AX25_ADDR_LEN;
  155. memcpy(dptr, &nr->dest_addr, AX25_ADDR_LEN);
  156. dptr[6] &= ~AX25_CBIT;
  157. dptr[6] |= AX25_EBIT;
  158. dptr[6] |= AX25_SSSID_SPARE;
  159. dptr += AX25_ADDR_LEN;
  160. *dptr++ = sysctl_netrom_network_ttl_initialiser;
  161. if (!nr_route_frame(skb, NULL)) {
  162. kfree_skb(skb);
  163. nr_disconnect(sk, ENETUNREACH);
  164. }
  165. }
  166. /*
  167. * The following routines are taken from page 170 of the 7th ARRL Computer
  168. * Networking Conference paper, as is the whole state machine.
  169. */
  170. void nr_establish_data_link(struct sock *sk)
  171. {
  172. struct nr_sock *nr = nr_sk(sk);
  173. nr->condition = 0x00;
  174. nr->n2count = 0;
  175. nr_write_internal(sk, NR_CONNREQ);
  176. nr_stop_t2timer(sk);
  177. nr_stop_t4timer(sk);
  178. nr_stop_idletimer(sk);
  179. nr_start_t1timer(sk);
  180. }
  181. /*
  182. * Never send a NAK when we are CHOKEd.
  183. */
  184. void nr_enquiry_response(struct sock *sk)
  185. {
  186. struct nr_sock *nr = nr_sk(sk);
  187. int frametype = NR_INFOACK;
  188. if (nr->condition & NR_COND_OWN_RX_BUSY) {
  189. frametype |= NR_CHOKE_FLAG;
  190. } else {
  191. if (skb_peek(&nr->reseq_queue) != NULL)
  192. frametype |= NR_NAK_FLAG;
  193. }
  194. nr_write_internal(sk, frametype);
  195. nr->vl = nr->vr;
  196. nr->condition &= ~NR_COND_ACK_PENDING;
  197. }
  198. void nr_check_iframes_acked(struct sock *sk, unsigned short nr)
  199. {
  200. struct nr_sock *nrom = nr_sk(sk);
  201. if (nrom->vs == nr) {
  202. nr_frames_acked(sk, nr);
  203. nr_stop_t1timer(sk);
  204. nrom->n2count = 0;
  205. } else {
  206. if (nrom->va != nr) {
  207. nr_frames_acked(sk, nr);
  208. nr_start_t1timer(sk);
  209. }
  210. }
  211. }