lowcomms-tcp.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007
  1. /******************************************************************************
  2. *******************************************************************************
  3. **
  4. ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  5. ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  6. **
  7. ** This copyrighted material is made available to anyone wishing to use,
  8. ** modify, copy, or redistribute it subject to the terms and conditions
  9. ** of the GNU General Public License v.2.
  10. **
  11. *******************************************************************************
  12. ******************************************************************************/
  13. /*
  14. * lowcomms.c
  15. *
  16. * This is the "low-level" comms layer.
  17. *
  18. * It is responsible for sending/receiving messages
  19. * from other nodes in the cluster.
  20. *
  21. * Cluster nodes are referred to by their nodeids. nodeids are
  22. * simply 32 bit numbers to the locking module - if they need to
  23. * be expanded for the cluster infrastructure then that is it's
  24. * responsibility. It is this layer's
  25. * responsibility to resolve these into IP address or
  26. * whatever it needs for inter-node communication.
  27. *
  28. * The comms level is two kernel threads that deal mainly with
  29. * the receiving of messages from other nodes and passing them
  30. * up to the mid-level comms layer (which understands the
  31. * message format) for execution by the locking core, and
  32. * a send thread which does all the setting up of connections
  33. * to remote nodes and the sending of data. Threads are not allowed
  34. * to send their own data because it may cause them to wait in times
  35. * of high load. Also, this way, the sending thread can collect together
  36. * messages bound for one node and send them in one block.
  37. *
  38. * I don't see any problem with the recv thread executing the locking
  39. * code on behalf of remote processes as the locking code is
  40. * short, efficient and never waits.
  41. *
  42. */
  43. #include <asm/ioctls.h>
  44. #include <net/sock.h>
  45. #include <net/tcp.h>
  46. #include <linux/pagemap.h>
  47. #include "dlm_internal.h"
  48. #include "lowcomms.h"
  49. #include "midcomms.h"
  50. #include "config.h"
  51. struct cbuf {
  52. unsigned int base;
  53. unsigned int len;
  54. unsigned int mask;
  55. };
  56. #define NODE_INCREMENT 32
  57. static void cbuf_add(struct cbuf *cb, int n)
  58. {
  59. cb->len += n;
  60. }
  61. static int cbuf_data(struct cbuf *cb)
  62. {
  63. return ((cb->base + cb->len) & cb->mask);
  64. }
  65. static void cbuf_init(struct cbuf *cb, int size)
  66. {
  67. cb->base = cb->len = 0;
  68. cb->mask = size-1;
  69. }
  70. static void cbuf_eat(struct cbuf *cb, int n)
  71. {
  72. cb->len -= n;
  73. cb->base += n;
  74. cb->base &= cb->mask;
  75. }
  76. static bool cbuf_empty(struct cbuf *cb)
  77. {
  78. return cb->len == 0;
  79. }
  80. /* Maximum number of incoming messages to process before
  81. doing a cond_resched()
  82. */
  83. #define MAX_RX_MSG_COUNT 25
  84. struct connection {
  85. struct socket *sock; /* NULL if not connected */
  86. uint32_t nodeid; /* So we know who we are in the list */
  87. struct mutex sock_mutex;
  88. unsigned long flags; /* bit 1,2 = We are on the read/write lists */
  89. #define CF_READ_PENDING 1
  90. #define CF_WRITE_PENDING 2
  91. #define CF_CONNECT_PENDING 3
  92. #define CF_IS_OTHERCON 4
  93. struct list_head writequeue; /* List of outgoing writequeue_entries */
  94. struct list_head listenlist; /* List of allocated listening sockets */
  95. spinlock_t writequeue_lock;
  96. int (*rx_action) (struct connection *); /* What to do when active */
  97. struct page *rx_page;
  98. struct cbuf cb;
  99. int retries;
  100. #define MAX_CONNECT_RETRIES 3
  101. struct connection *othercon;
  102. struct work_struct rwork; /* Receive workqueue */
  103. struct work_struct swork; /* Send workqueue */
  104. };
  105. #define sock2con(x) ((struct connection *)(x)->sk_user_data)
  106. /* An entry waiting to be sent */
  107. struct writequeue_entry {
  108. struct list_head list;
  109. struct page *page;
  110. int offset;
  111. int len;
  112. int end;
  113. int users;
  114. struct connection *con;
  115. };
  116. static struct sockaddr_storage dlm_local_addr;
  117. /* Work queues */
  118. static struct workqueue_struct *recv_workqueue;
  119. static struct workqueue_struct *send_workqueue;
  120. /* An array of pointers to connections, indexed by NODEID */
  121. static struct connection **connections;
  122. static DECLARE_MUTEX(connections_lock);
  123. static struct kmem_cache *con_cache;
  124. static int conn_array_size;
  125. static void process_recv_sockets(struct work_struct *work);
  126. static void process_send_sockets(struct work_struct *work);
  127. static struct connection *nodeid2con(int nodeid, gfp_t allocation)
  128. {
  129. struct connection *con = NULL;
  130. down(&connections_lock);
  131. if (nodeid >= conn_array_size) {
  132. int new_size = nodeid + NODE_INCREMENT;
  133. struct connection **new_conns;
  134. new_conns = kzalloc(sizeof(struct connection *) *
  135. new_size, allocation);
  136. if (!new_conns)
  137. goto finish;
  138. memcpy(new_conns, connections, sizeof(struct connection *) * conn_array_size);
  139. conn_array_size = new_size;
  140. kfree(connections);
  141. connections = new_conns;
  142. }
  143. con = connections[nodeid];
  144. if (con == NULL && allocation) {
  145. con = kmem_cache_zalloc(con_cache, allocation);
  146. if (!con)
  147. goto finish;
  148. con->nodeid = nodeid;
  149. mutex_init(&con->sock_mutex);
  150. INIT_LIST_HEAD(&con->writequeue);
  151. spin_lock_init(&con->writequeue_lock);
  152. INIT_WORK(&con->swork, process_send_sockets);
  153. INIT_WORK(&con->rwork, process_recv_sockets);
  154. connections[nodeid] = con;
  155. }
  156. finish:
  157. up(&connections_lock);
  158. return con;
  159. }
  160. /* Data available on socket or listen socket received a connect */
  161. static void lowcomms_data_ready(struct sock *sk, int count_unused)
  162. {
  163. struct connection *con = sock2con(sk);
  164. if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
  165. queue_work(recv_workqueue, &con->rwork);
  166. }
  167. static void lowcomms_write_space(struct sock *sk)
  168. {
  169. struct connection *con = sock2con(sk);
  170. if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
  171. queue_work(send_workqueue, &con->swork);
  172. }
  173. static inline void lowcomms_connect_sock(struct connection *con)
  174. {
  175. if (!test_and_set_bit(CF_CONNECT_PENDING, &con->flags))
  176. queue_work(send_workqueue, &con->swork);
  177. }
  178. static void lowcomms_state_change(struct sock *sk)
  179. {
  180. if (sk->sk_state == TCP_ESTABLISHED)
  181. lowcomms_write_space(sk);
  182. }
  183. /* Make a socket active */
  184. static int add_sock(struct socket *sock, struct connection *con)
  185. {
  186. con->sock = sock;
  187. /* Install a data_ready callback */
  188. con->sock->sk->sk_data_ready = lowcomms_data_ready;
  189. con->sock->sk->sk_write_space = lowcomms_write_space;
  190. con->sock->sk->sk_state_change = lowcomms_state_change;
  191. return 0;
  192. }
  193. /* Add the port number to an IP6 or 4 sockaddr and return the address
  194. length */
  195. static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
  196. int *addr_len)
  197. {
  198. saddr->ss_family = dlm_local_addr.ss_family;
  199. if (saddr->ss_family == AF_INET) {
  200. struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
  201. in4_addr->sin_port = cpu_to_be16(port);
  202. *addr_len = sizeof(struct sockaddr_in);
  203. } else {
  204. struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
  205. in6_addr->sin6_port = cpu_to_be16(port);
  206. *addr_len = sizeof(struct sockaddr_in6);
  207. }
  208. }
  209. /* Close a remote connection and tidy up */
  210. static void close_connection(struct connection *con, bool and_other)
  211. {
  212. mutex_lock(&con->sock_mutex);
  213. if (con->sock) {
  214. sock_release(con->sock);
  215. con->sock = NULL;
  216. }
  217. if (con->othercon && and_other) {
  218. /* Will only re-enter once. */
  219. close_connection(con->othercon, false);
  220. }
  221. if (con->rx_page) {
  222. __free_page(con->rx_page);
  223. con->rx_page = NULL;
  224. }
  225. con->retries = 0;
  226. mutex_unlock(&con->sock_mutex);
  227. }
  228. /* Data received from remote end */
  229. static int receive_from_sock(struct connection *con)
  230. {
  231. int ret = 0;
  232. struct msghdr msg = {};
  233. struct kvec iov[2];
  234. unsigned len;
  235. int r;
  236. int call_again_soon = 0;
  237. int nvec;
  238. mutex_lock(&con->sock_mutex);
  239. if (con->sock == NULL) {
  240. ret = -EAGAIN;
  241. goto out_close;
  242. }
  243. if (con->rx_page == NULL) {
  244. /*
  245. * This doesn't need to be atomic, but I think it should
  246. * improve performance if it is.
  247. */
  248. con->rx_page = alloc_page(GFP_ATOMIC);
  249. if (con->rx_page == NULL)
  250. goto out_resched;
  251. cbuf_init(&con->cb, PAGE_CACHE_SIZE);
  252. }
  253. /*
  254. * iov[0] is the bit of the circular buffer between the current end
  255. * point (cb.base + cb.len) and the end of the buffer.
  256. */
  257. iov[0].iov_len = con->cb.base - cbuf_data(&con->cb);
  258. iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb);
  259. nvec = 1;
  260. /*
  261. * iov[1] is the bit of the circular buffer between the start of the
  262. * buffer and the start of the currently used section (cb.base)
  263. */
  264. if (cbuf_data(&con->cb) >= con->cb.base) {
  265. iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb);
  266. iov[1].iov_len = con->cb.base;
  267. iov[1].iov_base = page_address(con->rx_page);
  268. nvec = 2;
  269. }
  270. len = iov[0].iov_len + iov[1].iov_len;
  271. r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len,
  272. MSG_DONTWAIT | MSG_NOSIGNAL);
  273. if (ret <= 0)
  274. goto out_close;
  275. if (ret == -EAGAIN)
  276. goto out_resched;
  277. if (ret == len)
  278. call_again_soon = 1;
  279. cbuf_add(&con->cb, ret);
  280. ret = dlm_process_incoming_buffer(con->nodeid,
  281. page_address(con->rx_page),
  282. con->cb.base, con->cb.len,
  283. PAGE_CACHE_SIZE);
  284. if (ret == -EBADMSG) {
  285. printk(KERN_INFO "dlm: lowcomms: addr=%p, base=%u, len=%u, "
  286. "iov_len=%u, iov_base[0]=%p, read=%d\n",
  287. page_address(con->rx_page), con->cb.base, con->cb.len,
  288. len, iov[0].iov_base, r);
  289. }
  290. if (ret < 0)
  291. goto out_close;
  292. cbuf_eat(&con->cb, ret);
  293. if (cbuf_empty(&con->cb) && !call_again_soon) {
  294. __free_page(con->rx_page);
  295. con->rx_page = NULL;
  296. }
  297. if (call_again_soon)
  298. goto out_resched;
  299. mutex_unlock(&con->sock_mutex);
  300. return 0;
  301. out_resched:
  302. if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
  303. queue_work(recv_workqueue, &con->rwork);
  304. mutex_unlock(&con->sock_mutex);
  305. return -EAGAIN;
  306. out_close:
  307. mutex_unlock(&con->sock_mutex);
  308. if (ret != -EAGAIN && !test_bit(CF_IS_OTHERCON, &con->flags)) {
  309. close_connection(con, false);
  310. /* Reconnect when there is something to send */
  311. }
  312. /* Don't return success if we really got EOF */
  313. if (ret == 0)
  314. ret = -EAGAIN;
  315. return ret;
  316. }
  317. /* Listening socket is busy, accept a connection */
  318. static int accept_from_sock(struct connection *con)
  319. {
  320. int result;
  321. struct sockaddr_storage peeraddr;
  322. struct socket *newsock;
  323. int len;
  324. int nodeid;
  325. struct connection *newcon;
  326. struct connection *addcon;
  327. memset(&peeraddr, 0, sizeof(peeraddr));
  328. result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM,
  329. IPPROTO_TCP, &newsock);
  330. if (result < 0)
  331. return -ENOMEM;
  332. mutex_lock_nested(&con->sock_mutex, 0);
  333. result = -ENOTCONN;
  334. if (con->sock == NULL)
  335. goto accept_err;
  336. newsock->type = con->sock->type;
  337. newsock->ops = con->sock->ops;
  338. result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK);
  339. if (result < 0)
  340. goto accept_err;
  341. /* Get the connected socket's peer */
  342. memset(&peeraddr, 0, sizeof(peeraddr));
  343. if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr,
  344. &len, 2)) {
  345. result = -ECONNABORTED;
  346. goto accept_err;
  347. }
  348. /* Get the new node's NODEID */
  349. make_sockaddr(&peeraddr, 0, &len);
  350. if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) {
  351. printk("dlm: connect from non cluster node\n");
  352. sock_release(newsock);
  353. mutex_unlock(&con->sock_mutex);
  354. return -1;
  355. }
  356. log_print("got connection from %d", nodeid);
  357. /* Check to see if we already have a connection to this node. This
  358. * could happen if the two nodes initiate a connection at roughly
  359. * the same time and the connections cross on the wire.
  360. * TEMPORARY FIX:
  361. * In this case we store the incoming one in "othercon"
  362. */
  363. newcon = nodeid2con(nodeid, GFP_KERNEL);
  364. if (!newcon) {
  365. result = -ENOMEM;
  366. goto accept_err;
  367. }
  368. mutex_lock_nested(&newcon->sock_mutex, 1);
  369. if (newcon->sock) {
  370. struct connection *othercon = newcon->othercon;
  371. if (!othercon) {
  372. othercon = kmem_cache_zalloc(con_cache, GFP_KERNEL);
  373. if (!othercon) {
  374. printk("dlm: failed to allocate incoming socket\n");
  375. mutex_unlock(&newcon->sock_mutex);
  376. result = -ENOMEM;
  377. goto accept_err;
  378. }
  379. othercon->nodeid = nodeid;
  380. othercon->rx_action = receive_from_sock;
  381. mutex_init(&othercon->sock_mutex);
  382. INIT_WORK(&othercon->swork, process_send_sockets);
  383. INIT_WORK(&othercon->rwork, process_recv_sockets);
  384. set_bit(CF_IS_OTHERCON, &othercon->flags);
  385. newcon->othercon = othercon;
  386. }
  387. othercon->sock = newsock;
  388. newsock->sk->sk_user_data = othercon;
  389. add_sock(newsock, othercon);
  390. addcon = othercon;
  391. }
  392. else {
  393. newsock->sk->sk_user_data = newcon;
  394. newcon->rx_action = receive_from_sock;
  395. add_sock(newsock, newcon);
  396. addcon = newcon;
  397. }
  398. mutex_unlock(&newcon->sock_mutex);
  399. /*
  400. * Add it to the active queue in case we got data
  401. * beween processing the accept adding the socket
  402. * to the read_sockets list
  403. */
  404. if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
  405. queue_work(recv_workqueue, &addcon->rwork);
  406. mutex_unlock(&con->sock_mutex);
  407. return 0;
  408. accept_err:
  409. mutex_unlock(&con->sock_mutex);
  410. sock_release(newsock);
  411. if (result != -EAGAIN)
  412. printk("dlm: error accepting connection from node: %d\n", result);
  413. return result;
  414. }
  415. /* Connect a new socket to its peer */
  416. static void connect_to_sock(struct connection *con)
  417. {
  418. int result = -EHOSTUNREACH;
  419. struct sockaddr_storage saddr;
  420. int addr_len;
  421. struct socket *sock;
  422. if (con->nodeid == 0) {
  423. log_print("attempt to connect sock 0 foiled");
  424. return;
  425. }
  426. mutex_lock(&con->sock_mutex);
  427. if (con->retries++ > MAX_CONNECT_RETRIES)
  428. goto out;
  429. /* Some odd races can cause double-connects, ignore them */
  430. if (con->sock) {
  431. result = 0;
  432. goto out;
  433. }
  434. /* Create a socket to communicate with */
  435. result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM,
  436. IPPROTO_TCP, &sock);
  437. if (result < 0)
  438. goto out_err;
  439. memset(&saddr, 0, sizeof(saddr));
  440. if (dlm_nodeid_to_addr(con->nodeid, &saddr))
  441. goto out_err;
  442. sock->sk->sk_user_data = con;
  443. con->rx_action = receive_from_sock;
  444. make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
  445. add_sock(sock, con);
  446. log_print("connecting to %d", con->nodeid);
  447. result =
  448. sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
  449. O_NONBLOCK);
  450. if (result == -EINPROGRESS)
  451. result = 0;
  452. if (result == 0)
  453. goto out;
  454. out_err:
  455. if (con->sock) {
  456. sock_release(con->sock);
  457. con->sock = NULL;
  458. }
  459. /*
  460. * Some errors are fatal and this list might need adjusting. For other
  461. * errors we try again until the max number of retries is reached.
  462. */
  463. if (result != -EHOSTUNREACH && result != -ENETUNREACH &&
  464. result != -ENETDOWN && result != EINVAL
  465. && result != -EPROTONOSUPPORT) {
  466. lowcomms_connect_sock(con);
  467. result = 0;
  468. }
  469. out:
  470. mutex_unlock(&con->sock_mutex);
  471. return;
  472. }
  473. static struct socket *create_listen_sock(struct connection *con,
  474. struct sockaddr_storage *saddr)
  475. {
  476. struct socket *sock = NULL;
  477. mm_segment_t fs;
  478. int result = 0;
  479. int one = 1;
  480. int addr_len;
  481. if (dlm_local_addr.ss_family == AF_INET)
  482. addr_len = sizeof(struct sockaddr_in);
  483. else
  484. addr_len = sizeof(struct sockaddr_in6);
  485. /* Create a socket to communicate with */
  486. result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM, IPPROTO_TCP, &sock);
  487. if (result < 0) {
  488. printk("dlm: Can't create listening comms socket\n");
  489. goto create_out;
  490. }
  491. fs = get_fs();
  492. set_fs(get_ds());
  493. result = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
  494. (char *)&one, sizeof(one));
  495. set_fs(fs);
  496. if (result < 0) {
  497. printk("dlm: Failed to set SO_REUSEADDR on socket: result=%d\n",
  498. result);
  499. }
  500. sock->sk->sk_user_data = con;
  501. con->rx_action = accept_from_sock;
  502. con->sock = sock;
  503. /* Bind to our port */
  504. make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
  505. result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
  506. if (result < 0) {
  507. printk("dlm: Can't bind to port %d\n", dlm_config.ci_tcp_port);
  508. sock_release(sock);
  509. sock = NULL;
  510. con->sock = NULL;
  511. goto create_out;
  512. }
  513. fs = get_fs();
  514. set_fs(get_ds());
  515. result = sock_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
  516. (char *)&one, sizeof(one));
  517. set_fs(fs);
  518. if (result < 0) {
  519. printk("dlm: Set keepalive failed: %d\n", result);
  520. }
  521. result = sock->ops->listen(sock, 5);
  522. if (result < 0) {
  523. printk("dlm: Can't listen on port %d\n", dlm_config.ci_tcp_port);
  524. sock_release(sock);
  525. sock = NULL;
  526. goto create_out;
  527. }
  528. create_out:
  529. return sock;
  530. }
  531. /* Listen on all interfaces */
  532. static int listen_for_all(void)
  533. {
  534. struct socket *sock = NULL;
  535. struct connection *con = nodeid2con(0, GFP_KERNEL);
  536. int result = -EINVAL;
  537. /* We don't support multi-homed hosts */
  538. set_bit(CF_IS_OTHERCON, &con->flags);
  539. sock = create_listen_sock(con, &dlm_local_addr);
  540. if (sock) {
  541. add_sock(sock, con);
  542. result = 0;
  543. }
  544. else {
  545. result = -EADDRINUSE;
  546. }
  547. return result;
  548. }
  549. static struct writequeue_entry *new_writequeue_entry(struct connection *con,
  550. gfp_t allocation)
  551. {
  552. struct writequeue_entry *entry;
  553. entry = kmalloc(sizeof(struct writequeue_entry), allocation);
  554. if (!entry)
  555. return NULL;
  556. entry->page = alloc_page(allocation);
  557. if (!entry->page) {
  558. kfree(entry);
  559. return NULL;
  560. }
  561. entry->offset = 0;
  562. entry->len = 0;
  563. entry->end = 0;
  564. entry->users = 0;
  565. entry->con = con;
  566. return entry;
  567. }
  568. void *dlm_lowcomms_get_buffer(int nodeid, int len,
  569. gfp_t allocation, char **ppc)
  570. {
  571. struct connection *con;
  572. struct writequeue_entry *e;
  573. int offset = 0;
  574. int users = 0;
  575. con = nodeid2con(nodeid, allocation);
  576. if (!con)
  577. return NULL;
  578. spin_lock(&con->writequeue_lock);
  579. e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
  580. if ((&e->list == &con->writequeue) ||
  581. (PAGE_CACHE_SIZE - e->end < len)) {
  582. e = NULL;
  583. } else {
  584. offset = e->end;
  585. e->end += len;
  586. users = e->users++;
  587. }
  588. spin_unlock(&con->writequeue_lock);
  589. if (e) {
  590. got_one:
  591. if (users == 0)
  592. kmap(e->page);
  593. *ppc = page_address(e->page) + offset;
  594. return e;
  595. }
  596. e = new_writequeue_entry(con, allocation);
  597. if (e) {
  598. spin_lock(&con->writequeue_lock);
  599. offset = e->end;
  600. e->end += len;
  601. users = e->users++;
  602. list_add_tail(&e->list, &con->writequeue);
  603. spin_unlock(&con->writequeue_lock);
  604. goto got_one;
  605. }
  606. return NULL;
  607. }
  608. void dlm_lowcomms_commit_buffer(void *mh)
  609. {
  610. struct writequeue_entry *e = (struct writequeue_entry *)mh;
  611. struct connection *con = e->con;
  612. int users;
  613. spin_lock(&con->writequeue_lock);
  614. users = --e->users;
  615. if (users)
  616. goto out;
  617. e->len = e->end - e->offset;
  618. kunmap(e->page);
  619. spin_unlock(&con->writequeue_lock);
  620. if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) {
  621. queue_work(send_workqueue, &con->swork);
  622. }
  623. return;
  624. out:
  625. spin_unlock(&con->writequeue_lock);
  626. return;
  627. }
  628. static void free_entry(struct writequeue_entry *e)
  629. {
  630. __free_page(e->page);
  631. kfree(e);
  632. }
  633. /* Send a message */
  634. static void send_to_sock(struct connection *con)
  635. {
  636. int ret = 0;
  637. ssize_t(*sendpage) (struct socket *, struct page *, int, size_t, int);
  638. const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
  639. struct writequeue_entry *e;
  640. int len, offset;
  641. mutex_lock(&con->sock_mutex);
  642. if (con->sock == NULL)
  643. goto out_connect;
  644. sendpage = con->sock->ops->sendpage;
  645. spin_lock(&con->writequeue_lock);
  646. for (;;) {
  647. e = list_entry(con->writequeue.next, struct writequeue_entry,
  648. list);
  649. if ((struct list_head *) e == &con->writequeue)
  650. break;
  651. len = e->len;
  652. offset = e->offset;
  653. BUG_ON(len == 0 && e->users == 0);
  654. spin_unlock(&con->writequeue_lock);
  655. kmap(e->page);
  656. ret = 0;
  657. if (len) {
  658. ret = sendpage(con->sock, e->page, offset, len,
  659. msg_flags);
  660. if (ret == -EAGAIN || ret == 0)
  661. goto out;
  662. if (ret <= 0)
  663. goto send_error;
  664. }
  665. else {
  666. /* Don't starve people filling buffers */
  667. cond_resched();
  668. }
  669. spin_lock(&con->writequeue_lock);
  670. e->offset += ret;
  671. e->len -= ret;
  672. if (e->len == 0 && e->users == 0) {
  673. list_del(&e->list);
  674. kunmap(e->page);
  675. free_entry(e);
  676. continue;
  677. }
  678. }
  679. spin_unlock(&con->writequeue_lock);
  680. out:
  681. mutex_unlock(&con->sock_mutex);
  682. return;
  683. send_error:
  684. mutex_unlock(&con->sock_mutex);
  685. close_connection(con, false);
  686. lowcomms_connect_sock(con);
  687. return;
  688. out_connect:
  689. mutex_unlock(&con->sock_mutex);
  690. connect_to_sock(con);
  691. return;
  692. }
  693. static void clean_one_writequeue(struct connection *con)
  694. {
  695. struct list_head *list;
  696. struct list_head *temp;
  697. spin_lock(&con->writequeue_lock);
  698. list_for_each_safe(list, temp, &con->writequeue) {
  699. struct writequeue_entry *e =
  700. list_entry(list, struct writequeue_entry, list);
  701. list_del(&e->list);
  702. free_entry(e);
  703. }
  704. spin_unlock(&con->writequeue_lock);
  705. }
  706. /* Called from recovery when it knows that a node has
  707. left the cluster */
  708. int dlm_lowcomms_close(int nodeid)
  709. {
  710. struct connection *con;
  711. if (!connections)
  712. goto out;
  713. log_print("closing connection to node %d", nodeid);
  714. con = nodeid2con(nodeid, 0);
  715. if (con) {
  716. clean_one_writequeue(con);
  717. close_connection(con, true);
  718. }
  719. return 0;
  720. out:
  721. return -1;
  722. }
  723. /* Look for activity on active sockets */
  724. static void process_recv_sockets(struct work_struct *work)
  725. {
  726. struct connection *con = container_of(work, struct connection, rwork);
  727. int err;
  728. clear_bit(CF_READ_PENDING, &con->flags);
  729. do {
  730. err = con->rx_action(con);
  731. } while (!err);
  732. }
  733. static void process_send_sockets(struct work_struct *work)
  734. {
  735. struct connection *con = container_of(work, struct connection, swork);
  736. if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
  737. connect_to_sock(con);
  738. }
  739. clear_bit(CF_WRITE_PENDING, &con->flags);
  740. send_to_sock(con);
  741. }
  742. /* Discard all entries on the write queues */
  743. static void clean_writequeues(void)
  744. {
  745. int nodeid;
  746. for (nodeid = 1; nodeid < conn_array_size; nodeid++) {
  747. struct connection *con = nodeid2con(nodeid, 0);
  748. if (con)
  749. clean_one_writequeue(con);
  750. }
  751. }
  752. static void work_stop(void)
  753. {
  754. destroy_workqueue(recv_workqueue);
  755. destroy_workqueue(send_workqueue);
  756. }
  757. static int work_start(void)
  758. {
  759. int error;
  760. recv_workqueue = create_workqueue("dlm_recv");
  761. error = IS_ERR(recv_workqueue);
  762. if (error) {
  763. log_print("can't start dlm_recv %d", error);
  764. return error;
  765. }
  766. send_workqueue = create_singlethread_workqueue("dlm_send");
  767. error = IS_ERR(send_workqueue);
  768. if (error) {
  769. log_print("can't start dlm_send %d", error);
  770. destroy_workqueue(recv_workqueue);
  771. return error;
  772. }
  773. return 0;
  774. }
  775. void dlm_lowcomms_stop(void)
  776. {
  777. int i;
  778. /* Set all the flags to prevent any
  779. socket activity.
  780. */
  781. for (i = 0; i < conn_array_size; i++) {
  782. if (connections[i])
  783. connections[i]->flags |= 0xFF;
  784. }
  785. work_stop();
  786. clean_writequeues();
  787. for (i = 0; i < conn_array_size; i++) {
  788. if (connections[i]) {
  789. close_connection(connections[i], true);
  790. if (connections[i]->othercon)
  791. kmem_cache_free(con_cache, connections[i]->othercon);
  792. kmem_cache_free(con_cache, connections[i]);
  793. }
  794. }
  795. kfree(connections);
  796. connections = NULL;
  797. kmem_cache_destroy(con_cache);
  798. }
  799. /* This is quite likely to sleep... */
  800. int dlm_lowcomms_start(void)
  801. {
  802. int error = 0;
  803. error = -ENOMEM;
  804. connections = kzalloc(sizeof(struct connection *) *
  805. NODE_INCREMENT, GFP_KERNEL);
  806. if (!connections)
  807. goto out;
  808. conn_array_size = NODE_INCREMENT;
  809. if (dlm_our_addr(&dlm_local_addr, 0)) {
  810. log_print("no local IP address has been set");
  811. goto fail_free_conn;
  812. }
  813. if (!dlm_our_addr(&dlm_local_addr, 1)) {
  814. log_print("This dlm comms module does not support multi-homed clustering");
  815. goto fail_free_conn;
  816. }
  817. con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection),
  818. __alignof__(struct connection), 0,
  819. NULL, NULL);
  820. if (!con_cache)
  821. goto fail_free_conn;
  822. /* Start listening */
  823. error = listen_for_all();
  824. if (error)
  825. goto fail_unlisten;
  826. error = work_start();
  827. if (error)
  828. goto fail_unlisten;
  829. return 0;
  830. fail_unlisten:
  831. close_connection(connections[0], false);
  832. kmem_cache_free(con_cache, connections[0]);
  833. kmem_cache_destroy(con_cache);
  834. fail_free_conn:
  835. kfree(connections);
  836. out:
  837. return error;
  838. }
  839. /*
  840. * Overrides for Emacs so that we follow Linus's tabbing style.
  841. * Emacs will notice this stuff at the end of the file and automatically
  842. * adjust the settings for this buffer only. This must remain at the end
  843. * of the file.
  844. * ---------------------------------------------------------------------------
  845. * Local variables:
  846. * c-file-style: "linux"
  847. * End:
  848. */