requestqueue.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /******************************************************************************
  3. *******************************************************************************
  4. **
  5. ** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
  6. **
  7. **
  8. *******************************************************************************
  9. ******************************************************************************/
  10. #include "dlm_internal.h"
  11. #include "member.h"
  12. #include "lock.h"
  13. #include "dir.h"
  14. #include "config.h"
  15. #include "requestqueue.h"
  16. struct rq_entry {
  17. struct list_head list;
  18. uint32_t recover_seq;
  19. int nodeid;
  20. struct dlm_message request;
  21. };
  22. /*
  23. * Requests received while the lockspace is in recovery get added to the
  24. * request queue and processed when recovery is complete. This happens when
  25. * the lockspace is suspended on some nodes before it is on others, or the
  26. * lockspace is enabled on some while still suspended on others.
  27. */
  28. void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms)
  29. {
  30. struct rq_entry *e;
  31. int length = ms->m_header.h_length - sizeof(struct dlm_message);
  32. e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS);
  33. if (!e) {
  34. log_print("dlm_add_requestqueue: out of memory len %d", length);
  35. return;
  36. }
  37. e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF;
  38. e->nodeid = nodeid;
  39. memcpy(&e->request, ms, ms->m_header.h_length);
  40. mutex_lock(&ls->ls_requestqueue_mutex);
  41. list_add_tail(&e->list, &ls->ls_requestqueue);
  42. mutex_unlock(&ls->ls_requestqueue_mutex);
  43. }
  44. /*
  45. * Called by dlm_recoverd to process normal messages saved while recovery was
  46. * happening. Normal locking has been enabled before this is called. dlm_recv
  47. * upon receiving a message, will wait for all saved messages to be drained
  48. * here before processing the message it got. If a new dlm_ls_stop() arrives
  49. * while we're processing these saved messages, it may block trying to suspend
  50. * dlm_recv if dlm_recv is waiting for us in dlm_wait_requestqueue. In that
  51. * case, we don't abort since locking_stopped is still 0. If dlm_recv is not
  52. * waiting for us, then this processing may be aborted due to locking_stopped.
  53. */
  54. int dlm_process_requestqueue(struct dlm_ls *ls)
  55. {
  56. struct rq_entry *e;
  57. struct dlm_message *ms;
  58. int error = 0;
  59. mutex_lock(&ls->ls_requestqueue_mutex);
  60. for (;;) {
  61. if (list_empty(&ls->ls_requestqueue)) {
  62. mutex_unlock(&ls->ls_requestqueue_mutex);
  63. error = 0;
  64. break;
  65. }
  66. e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
  67. mutex_unlock(&ls->ls_requestqueue_mutex);
  68. ms = &e->request;
  69. log_limit(ls, "dlm_process_requestqueue msg %d from %d "
  70. "lkid %x remid %x result %d seq %u",
  71. ms->m_type, ms->m_header.h_nodeid,
  72. ms->m_lkid, ms->m_remid, ms->m_result,
  73. e->recover_seq);
  74. dlm_receive_message_saved(ls, &e->request, e->recover_seq);
  75. mutex_lock(&ls->ls_requestqueue_mutex);
  76. list_del(&e->list);
  77. kfree(e);
  78. if (dlm_locking_stopped(ls)) {
  79. log_debug(ls, "process_requestqueue abort running");
  80. mutex_unlock(&ls->ls_requestqueue_mutex);
  81. error = -EINTR;
  82. break;
  83. }
  84. schedule();
  85. }
  86. return error;
  87. }
  88. /*
  89. * After recovery is done, locking is resumed and dlm_recoverd takes all the
  90. * saved requests and processes them as they would have been by dlm_recv. At
  91. * the same time, dlm_recv will start receiving new requests from remote nodes.
  92. * We want to delay dlm_recv processing new requests until dlm_recoverd has
  93. * finished processing the old saved requests. We don't check for locking
  94. * stopped here because dlm_ls_stop won't stop locking until it's suspended us
  95. * (dlm_recv).
  96. */
  97. void dlm_wait_requestqueue(struct dlm_ls *ls)
  98. {
  99. for (;;) {
  100. mutex_lock(&ls->ls_requestqueue_mutex);
  101. if (list_empty(&ls->ls_requestqueue))
  102. break;
  103. mutex_unlock(&ls->ls_requestqueue_mutex);
  104. schedule();
  105. }
  106. mutex_unlock(&ls->ls_requestqueue_mutex);
  107. }
  108. static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
  109. {
  110. uint32_t type = ms->m_type;
  111. /* the ls is being cleaned up and freed by release_lockspace */
  112. if (!ls->ls_count)
  113. return 1;
  114. if (dlm_is_removed(ls, nodeid))
  115. return 1;
  116. /* directory operations are always purged because the directory is
  117. always rebuilt during recovery and the lookups resent */
  118. if (type == DLM_MSG_REMOVE ||
  119. type == DLM_MSG_LOOKUP ||
  120. type == DLM_MSG_LOOKUP_REPLY)
  121. return 1;
  122. if (!dlm_no_directory(ls))
  123. return 0;
  124. return 1;
  125. }
  126. void dlm_purge_requestqueue(struct dlm_ls *ls)
  127. {
  128. struct dlm_message *ms;
  129. struct rq_entry *e, *safe;
  130. mutex_lock(&ls->ls_requestqueue_mutex);
  131. list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) {
  132. ms = &e->request;
  133. if (purge_request(ls, ms, e->nodeid)) {
  134. list_del(&e->list);
  135. kfree(e);
  136. }
  137. }
  138. mutex_unlock(&ls->ls_requestqueue_mutex);
  139. }