dm-log-userspace-transfer.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. /*
  2. * Copyright (C) 2006-2009 Red Hat, Inc.
  3. *
  4. * This file is released under the LGPL.
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/slab.h>
  9. #include <net/sock.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/connector.h>
  12. #include <linux/device-mapper.h>
  13. #include <linux/dm-log-userspace.h>
  14. #include "dm-log-userspace-transfer.h"
  15. static uint32_t dm_ulog_seq;
  16. /*
  17. * Netlink/Connector is an unreliable protocol. How long should
  18. * we wait for a response before assuming it was lost and retrying?
  19. * (If we do receive a response after this time, it will be discarded
  20. * and the response to the resent request will be waited for.
  21. */
  22. #define DM_ULOG_RETRY_TIMEOUT (15 * HZ)
  23. /*
  24. * Pre-allocated space for speed
  25. */
  26. #define DM_ULOG_PREALLOCED_SIZE 512
  27. static struct cn_msg *prealloced_cn_msg;
  28. static struct dm_ulog_request *prealloced_ulog_tfr;
  29. static struct cb_id ulog_cn_id = {
  30. .idx = CN_IDX_DM,
  31. .val = CN_VAL_DM_USERSPACE_LOG
  32. };
  33. static DEFINE_MUTEX(dm_ulog_lock);
  34. struct receiving_pkg {
  35. struct list_head list;
  36. struct completion complete;
  37. uint32_t seq;
  38. int error;
  39. size_t *data_size;
  40. char *data;
  41. };
  42. static DEFINE_SPINLOCK(receiving_list_lock);
  43. static struct list_head receiving_list;
  44. static int dm_ulog_sendto_server(struct dm_ulog_request *tfr)
  45. {
  46. int r;
  47. struct cn_msg *msg = prealloced_cn_msg;
  48. memset(msg, 0, sizeof(struct cn_msg));
  49. msg->id.idx = ulog_cn_id.idx;
  50. msg->id.val = ulog_cn_id.val;
  51. msg->ack = 0;
  52. msg->seq = tfr->seq;
  53. msg->len = sizeof(struct dm_ulog_request) + tfr->data_size;
  54. r = cn_netlink_send(msg, 0, 0, gfp_any());
  55. return r;
  56. }
  57. /*
  58. * Parameters for this function can be either msg or tfr, but not
  59. * both. This function fills in the reply for a waiting request.
  60. * If just msg is given, then the reply is simply an ACK from userspace
  61. * that the request was received.
  62. *
  63. * Returns: 0 on success, -ENOENT on failure
  64. */
  65. static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
  66. {
  67. uint32_t rtn_seq = (msg) ? msg->seq : (tfr) ? tfr->seq : 0;
  68. struct receiving_pkg *pkg;
  69. /*
  70. * The 'receiving_pkg' entries in this list are statically
  71. * allocated on the stack in 'dm_consult_userspace'.
  72. * Each process that is waiting for a reply from the user
  73. * space server will have an entry in this list.
  74. *
  75. * We are safe to do it this way because the stack space
  76. * is unique to each process, but still addressable by
  77. * other processes.
  78. */
  79. list_for_each_entry(pkg, &receiving_list, list) {
  80. if (rtn_seq != pkg->seq)
  81. continue;
  82. if (msg) {
  83. pkg->error = -msg->ack;
  84. /*
  85. * If we are trying again, we will need to know our
  86. * storage capacity. Otherwise, along with the
  87. * error code, we make explicit that we have no data.
  88. */
  89. if (pkg->error != -EAGAIN)
  90. *(pkg->data_size) = 0;
  91. } else if (tfr->data_size > *(pkg->data_size)) {
  92. DMERR("Insufficient space to receive package [%u] "
  93. "(%u vs %zu)", tfr->request_type,
  94. tfr->data_size, *(pkg->data_size));
  95. *(pkg->data_size) = 0;
  96. pkg->error = -ENOSPC;
  97. } else {
  98. pkg->error = tfr->error;
  99. memcpy(pkg->data, tfr->data, tfr->data_size);
  100. *(pkg->data_size) = tfr->data_size;
  101. }
  102. complete(&pkg->complete);
  103. return 0;
  104. }
  105. return -ENOENT;
  106. }
  107. /*
  108. * This is the connector callback that delivers data
  109. * that was sent from userspace.
  110. */
  111. static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
  112. {
  113. struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
  114. if (!capable(CAP_SYS_ADMIN))
  115. return;
  116. spin_lock(&receiving_list_lock);
  117. if (msg->len == 0)
  118. fill_pkg(msg, NULL);
  119. else if (msg->len < sizeof(*tfr))
  120. DMERR("Incomplete message received (expected %u, got %u): [%u]",
  121. (unsigned)sizeof(*tfr), msg->len, msg->seq);
  122. else
  123. fill_pkg(NULL, tfr);
  124. spin_unlock(&receiving_list_lock);
  125. }
  126. /**
  127. * dm_consult_userspace
  128. * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size)
  129. * @luid: log's local unique identifier
  130. * @request_type: found in include/linux/dm-log-userspace.h
  131. * @data: data to tx to the server
  132. * @data_size: size of data in bytes
  133. * @rdata: place to put return data from server
  134. * @rdata_size: value-result (amount of space given/amount of space used)
  135. *
  136. * rdata_size is undefined on failure.
  137. *
  138. * Memory used to communicate with userspace is zero'ed
  139. * before populating to ensure that no unwanted bits leak
  140. * from kernel space to user-space. All userspace log communications
  141. * between kernel and user space go through this function.
  142. *
  143. * Returns: 0 on success, -EXXX on failure
  144. **/
  145. int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
  146. char *data, size_t data_size,
  147. char *rdata, size_t *rdata_size)
  148. {
  149. int r = 0;
  150. unsigned long tmo;
  151. size_t dummy = 0;
  152. int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
  153. struct dm_ulog_request *tfr = prealloced_ulog_tfr;
  154. struct receiving_pkg pkg;
  155. /*
  156. * Given the space needed to hold the 'struct cn_msg' and
  157. * 'struct dm_ulog_request' - do we have enough payload
  158. * space remaining?
  159. */
  160. if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
  161. DMINFO("Size of tfr exceeds preallocated size");
  162. return -EINVAL;
  163. }
  164. if (!rdata_size)
  165. rdata_size = &dummy;
  166. resend:
  167. /*
  168. * We serialize the sending of requests so we can
  169. * use the preallocated space.
  170. */
  171. mutex_lock(&dm_ulog_lock);
  172. memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
  173. memcpy(tfr->uuid, uuid, DM_UUID_LEN);
  174. tfr->version = DM_ULOG_REQUEST_VERSION;
  175. tfr->luid = luid;
  176. tfr->seq = dm_ulog_seq++;
  177. /*
  178. * Must be valid request type (all other bits set to
  179. * zero). This reserves other bits for possible future
  180. * use.
  181. */
  182. tfr->request_type = request_type & DM_ULOG_REQUEST_MASK;
  183. tfr->data_size = data_size;
  184. if (data && data_size)
  185. memcpy(tfr->data, data, data_size);
  186. memset(&pkg, 0, sizeof(pkg));
  187. init_completion(&pkg.complete);
  188. pkg.seq = tfr->seq;
  189. pkg.data_size = rdata_size;
  190. pkg.data = rdata;
  191. spin_lock(&receiving_list_lock);
  192. list_add(&(pkg.list), &receiving_list);
  193. spin_unlock(&receiving_list_lock);
  194. r = dm_ulog_sendto_server(tfr);
  195. mutex_unlock(&dm_ulog_lock);
  196. if (r) {
  197. DMERR("Unable to send log request [%u] to userspace: %d",
  198. request_type, r);
  199. spin_lock(&receiving_list_lock);
  200. list_del_init(&(pkg.list));
  201. spin_unlock(&receiving_list_lock);
  202. goto out;
  203. }
  204. tmo = wait_for_completion_timeout(&(pkg.complete), DM_ULOG_RETRY_TIMEOUT);
  205. spin_lock(&receiving_list_lock);
  206. list_del_init(&(pkg.list));
  207. spin_unlock(&receiving_list_lock);
  208. if (!tmo) {
  209. DMWARN("[%s] Request timed out: [%u/%u] - retrying",
  210. (strlen(uuid) > 8) ?
  211. (uuid + (strlen(uuid) - 8)) : (uuid),
  212. request_type, pkg.seq);
  213. goto resend;
  214. }
  215. r = pkg.error;
  216. if (r == -EAGAIN)
  217. goto resend;
  218. out:
  219. return r;
  220. }
  221. int dm_ulog_tfr_init(void)
  222. {
  223. int r;
  224. void *prealloced;
  225. INIT_LIST_HEAD(&receiving_list);
  226. prealloced = kmalloc(DM_ULOG_PREALLOCED_SIZE, GFP_KERNEL);
  227. if (!prealloced)
  228. return -ENOMEM;
  229. prealloced_cn_msg = prealloced;
  230. prealloced_ulog_tfr = prealloced + sizeof(struct cn_msg);
  231. r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
  232. if (r) {
  233. kfree(prealloced_cn_msg);
  234. return r;
  235. }
  236. return 0;
  237. }
  238. void dm_ulog_tfr_exit(void)
  239. {
  240. cn_del_callback(&ulog_cn_id);
  241. kfree(prealloced_cn_msg);
  242. }