xpc_channel.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) channel support.
  10. *
  11. * This is the part of XPC that manages the channels and
  12. * sends/receives messages across them to/from other partitions.
  13. *
  14. */
  15. #include <linux/device.h>
  16. #include "xpc.h"
  17. /*
  18. * Process a connect message from a remote partition.
  19. *
  20. * Note: xpc_process_connect() is expecting to be called with the
  21. * spin_lock_irqsave held and will leave it locked upon return.
  22. */
  23. static void
  24. xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
  25. {
  26. enum xp_retval ret;
  27. lockdep_assert_held(&ch->lock);
  28. if (!(ch->flags & XPC_C_OPENREQUEST) ||
  29. !(ch->flags & XPC_C_ROPENREQUEST)) {
  30. /* nothing more to do for now */
  31. return;
  32. }
  33. DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
  34. if (!(ch->flags & XPC_C_SETUP)) {
  35. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  36. ret = xpc_arch_ops.setup_msg_structures(ch);
  37. spin_lock_irqsave(&ch->lock, *irq_flags);
  38. if (ret != xpSuccess)
  39. XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
  40. else
  41. ch->flags |= XPC_C_SETUP;
  42. if (ch->flags & XPC_C_DISCONNECTING)
  43. return;
  44. }
  45. if (!(ch->flags & XPC_C_OPENREPLY)) {
  46. ch->flags |= XPC_C_OPENREPLY;
  47. xpc_arch_ops.send_chctl_openreply(ch, irq_flags);
  48. }
  49. if (!(ch->flags & XPC_C_ROPENREPLY))
  50. return;
  51. if (!(ch->flags & XPC_C_OPENCOMPLETE)) {
  52. ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED);
  53. xpc_arch_ops.send_chctl_opencomplete(ch, irq_flags);
  54. }
  55. if (!(ch->flags & XPC_C_ROPENCOMPLETE))
  56. return;
  57. dev_info(xpc_chan, "channel %d to partition %d connected\n",
  58. ch->number, ch->partid);
  59. ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
  60. }
  61. /*
  62. * spin_lock_irqsave() is expected to be held on entry.
  63. */
  64. static void
  65. xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
  66. {
  67. struct xpc_partition *part = &xpc_partitions[ch->partid];
  68. u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
  69. lockdep_assert_held(&ch->lock);
  70. if (!(ch->flags & XPC_C_DISCONNECTING))
  71. return;
  72. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  73. /* make sure all activity has settled down first */
  74. if (atomic_read(&ch->kthreads_assigned) > 0 ||
  75. atomic_read(&ch->references) > 0) {
  76. return;
  77. }
  78. DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  79. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
  80. if (part->act_state == XPC_P_AS_DEACTIVATING) {
  81. /* can't proceed until the other side disengages from us */
  82. if (xpc_arch_ops.partition_engaged(ch->partid))
  83. return;
  84. } else {
  85. /* as long as the other side is up do the full protocol */
  86. if (!(ch->flags & XPC_C_RCLOSEREQUEST))
  87. return;
  88. if (!(ch->flags & XPC_C_CLOSEREPLY)) {
  89. ch->flags |= XPC_C_CLOSEREPLY;
  90. xpc_arch_ops.send_chctl_closereply(ch, irq_flags);
  91. }
  92. if (!(ch->flags & XPC_C_RCLOSEREPLY))
  93. return;
  94. }
  95. /* wake those waiting for notify completion */
  96. if (atomic_read(&ch->n_to_notify) > 0) {
  97. /* we do callout while holding ch->lock, callout can't block */
  98. xpc_arch_ops.notify_senders_of_disconnect(ch);
  99. }
  100. /* both sides are disconnected now */
  101. if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
  102. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  103. xpc_disconnect_callout(ch, xpDisconnected);
  104. spin_lock_irqsave(&ch->lock, *irq_flags);
  105. }
  106. DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
  107. /* it's now safe to free the channel's message queues */
  108. xpc_arch_ops.teardown_msg_structures(ch);
  109. ch->func = NULL;
  110. ch->key = NULL;
  111. ch->entry_size = 0;
  112. ch->local_nentries = 0;
  113. ch->remote_nentries = 0;
  114. ch->kthreads_assigned_limit = 0;
  115. ch->kthreads_idle_limit = 0;
  116. /*
  117. * Mark the channel disconnected and clear all other flags, including
  118. * XPC_C_SETUP (because of call to
  119. * xpc_arch_ops.teardown_msg_structures()) but not including
  120. * XPC_C_WDISCONNECT (if it was set).
  121. */
  122. ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
  123. atomic_dec(&part->nchannels_active);
  124. if (channel_was_connected) {
  125. dev_info(xpc_chan, "channel %d to partition %d disconnected, "
  126. "reason=%d\n", ch->number, ch->partid, ch->reason);
  127. }
  128. if (ch->flags & XPC_C_WDISCONNECT) {
  129. /* we won't lose the CPU since we're holding ch->lock */
  130. complete(&ch->wdisconnect_wait);
  131. } else if (ch->delayed_chctl_flags) {
  132. if (part->act_state != XPC_P_AS_DEACTIVATING) {
  133. /* time to take action on any delayed chctl flags */
  134. spin_lock(&part->chctl_lock);
  135. part->chctl.flags[ch->number] |=
  136. ch->delayed_chctl_flags;
  137. spin_unlock(&part->chctl_lock);
  138. }
  139. ch->delayed_chctl_flags = 0;
  140. }
  141. }
  142. /*
  143. * Process a change in the channel's remote connection state.
  144. */
  145. static void
  146. xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
  147. u8 chctl_flags)
  148. {
  149. unsigned long irq_flags;
  150. struct xpc_openclose_args *args =
  151. &part->remote_openclose_args[ch_number];
  152. struct xpc_channel *ch = &part->channels[ch_number];
  153. enum xp_retval reason;
  154. enum xp_retval ret;
  155. int create_kthread = 0;
  156. spin_lock_irqsave(&ch->lock, irq_flags);
  157. again:
  158. if ((ch->flags & XPC_C_DISCONNECTED) &&
  159. (ch->flags & XPC_C_WDISCONNECT)) {
  160. /*
  161. * Delay processing chctl flags until thread waiting disconnect
  162. * has had a chance to see that the channel is disconnected.
  163. */
  164. ch->delayed_chctl_flags |= chctl_flags;
  165. goto out;
  166. }
  167. if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
  168. dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received "
  169. "from partid=%d, channel=%d\n", args->reason,
  170. ch->partid, ch->number);
  171. /*
  172. * If RCLOSEREQUEST is set, we're probably waiting for
  173. * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
  174. * with this RCLOSEREQUEST in the chctl_flags.
  175. */
  176. if (ch->flags & XPC_C_RCLOSEREQUEST) {
  177. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
  178. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  179. DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
  180. DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
  181. DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY));
  182. chctl_flags &= ~XPC_CHCTL_CLOSEREPLY;
  183. ch->flags |= XPC_C_RCLOSEREPLY;
  184. /* both sides have finished disconnecting */
  185. xpc_process_disconnect(ch, &irq_flags);
  186. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
  187. goto again;
  188. }
  189. if (ch->flags & XPC_C_DISCONNECTED) {
  190. if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) {
  191. if (part->chctl.flags[ch_number] &
  192. XPC_CHCTL_OPENREQUEST) {
  193. DBUG_ON(ch->delayed_chctl_flags != 0);
  194. spin_lock(&part->chctl_lock);
  195. part->chctl.flags[ch_number] |=
  196. XPC_CHCTL_CLOSEREQUEST;
  197. spin_unlock(&part->chctl_lock);
  198. }
  199. goto out;
  200. }
  201. XPC_SET_REASON(ch, 0, 0);
  202. ch->flags &= ~XPC_C_DISCONNECTED;
  203. atomic_inc(&part->nchannels_active);
  204. ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
  205. }
  206. chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY |
  207. XPC_CHCTL_OPENCOMPLETE);
  208. /*
  209. * The meaningful CLOSEREQUEST connection state fields are:
  210. * reason = reason connection is to be closed
  211. */
  212. ch->flags |= XPC_C_RCLOSEREQUEST;
  213. if (!(ch->flags & XPC_C_DISCONNECTING)) {
  214. reason = args->reason;
  215. if (reason <= xpSuccess || reason > xpUnknownReason)
  216. reason = xpUnknownReason;
  217. else if (reason == xpUnregistering)
  218. reason = xpOtherUnregistering;
  219. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  220. DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
  221. goto out;
  222. }
  223. xpc_process_disconnect(ch, &irq_flags);
  224. }
  225. if (chctl_flags & XPC_CHCTL_CLOSEREPLY) {
  226. dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid="
  227. "%d, channel=%d\n", ch->partid, ch->number);
  228. if (ch->flags & XPC_C_DISCONNECTED) {
  229. DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
  230. goto out;
  231. }
  232. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  233. if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
  234. if (part->chctl.flags[ch_number] &
  235. XPC_CHCTL_CLOSEREQUEST) {
  236. DBUG_ON(ch->delayed_chctl_flags != 0);
  237. spin_lock(&part->chctl_lock);
  238. part->chctl.flags[ch_number] |=
  239. XPC_CHCTL_CLOSEREPLY;
  240. spin_unlock(&part->chctl_lock);
  241. }
  242. goto out;
  243. }
  244. ch->flags |= XPC_C_RCLOSEREPLY;
  245. if (ch->flags & XPC_C_CLOSEREPLY) {
  246. /* both sides have finished disconnecting */
  247. xpc_process_disconnect(ch, &irq_flags);
  248. }
  249. }
  250. if (chctl_flags & XPC_CHCTL_OPENREQUEST) {
  251. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, "
  252. "local_nentries=%d) received from partid=%d, "
  253. "channel=%d\n", args->entry_size, args->local_nentries,
  254. ch->partid, ch->number);
  255. if (part->act_state == XPC_P_AS_DEACTIVATING ||
  256. (ch->flags & XPC_C_ROPENREQUEST)) {
  257. goto out;
  258. }
  259. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
  260. ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
  261. goto out;
  262. }
  263. DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
  264. XPC_C_OPENREQUEST)));
  265. DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  266. XPC_C_OPENREPLY | XPC_C_CONNECTED));
  267. /*
  268. * The meaningful OPENREQUEST connection state fields are:
  269. * entry_size = size of channel's messages in bytes
  270. * local_nentries = remote partition's local_nentries
  271. */
  272. if (args->entry_size == 0 || args->local_nentries == 0) {
  273. /* assume OPENREQUEST was delayed by mistake */
  274. goto out;
  275. }
  276. ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
  277. ch->remote_nentries = args->local_nentries;
  278. if (ch->flags & XPC_C_OPENREQUEST) {
  279. if (args->entry_size != ch->entry_size) {
  280. XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
  281. &irq_flags);
  282. goto out;
  283. }
  284. } else {
  285. ch->entry_size = args->entry_size;
  286. XPC_SET_REASON(ch, 0, 0);
  287. ch->flags &= ~XPC_C_DISCONNECTED;
  288. atomic_inc(&part->nchannels_active);
  289. }
  290. xpc_process_connect(ch, &irq_flags);
  291. }
  292. if (chctl_flags & XPC_CHCTL_OPENREPLY) {
  293. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
  294. "0x%lx, local_nentries=%d, remote_nentries=%d) "
  295. "received from partid=%d, channel=%d\n",
  296. args->local_msgqueue_pa, args->local_nentries,
  297. args->remote_nentries, ch->partid, ch->number);
  298. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
  299. goto out;
  300. if (!(ch->flags & XPC_C_OPENREQUEST)) {
  301. XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
  302. &irq_flags);
  303. goto out;
  304. }
  305. DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
  306. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  307. /*
  308. * The meaningful OPENREPLY connection state fields are:
  309. * local_msgqueue_pa = physical address of remote
  310. * partition's local_msgqueue
  311. * local_nentries = remote partition's local_nentries
  312. * remote_nentries = remote partition's remote_nentries
  313. */
  314. DBUG_ON(args->local_msgqueue_pa == 0);
  315. DBUG_ON(args->local_nentries == 0);
  316. DBUG_ON(args->remote_nentries == 0);
  317. ret = xpc_arch_ops.save_remote_msgqueue_pa(ch,
  318. args->local_msgqueue_pa);
  319. if (ret != xpSuccess) {
  320. XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags);
  321. goto out;
  322. }
  323. ch->flags |= XPC_C_ROPENREPLY;
  324. if (args->local_nentries < ch->remote_nentries) {
  325. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
  326. "remote_nentries=%d, old remote_nentries=%d, "
  327. "partid=%d, channel=%d\n",
  328. args->local_nentries, ch->remote_nentries,
  329. ch->partid, ch->number);
  330. ch->remote_nentries = args->local_nentries;
  331. }
  332. if (args->remote_nentries < ch->local_nentries) {
  333. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
  334. "local_nentries=%d, old local_nentries=%d, "
  335. "partid=%d, channel=%d\n",
  336. args->remote_nentries, ch->local_nentries,
  337. ch->partid, ch->number);
  338. ch->local_nentries = args->remote_nentries;
  339. }
  340. xpc_process_connect(ch, &irq_flags);
  341. }
  342. if (chctl_flags & XPC_CHCTL_OPENCOMPLETE) {
  343. dev_dbg(xpc_chan, "XPC_CHCTL_OPENCOMPLETE received from "
  344. "partid=%d, channel=%d\n", ch->partid, ch->number);
  345. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
  346. goto out;
  347. if (!(ch->flags & XPC_C_OPENREQUEST) ||
  348. !(ch->flags & XPC_C_OPENREPLY)) {
  349. XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
  350. &irq_flags);
  351. goto out;
  352. }
  353. DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
  354. DBUG_ON(!(ch->flags & XPC_C_ROPENREPLY));
  355. DBUG_ON(!(ch->flags & XPC_C_CONNECTED));
  356. ch->flags |= XPC_C_ROPENCOMPLETE;
  357. xpc_process_connect(ch, &irq_flags);
  358. create_kthread = 1;
  359. }
  360. out:
  361. spin_unlock_irqrestore(&ch->lock, irq_flags);
  362. if (create_kthread)
  363. xpc_create_kthreads(ch, 1, 0);
  364. }
  365. /*
  366. * Attempt to establish a channel connection to a remote partition.
  367. */
  368. static enum xp_retval
  369. xpc_connect_channel(struct xpc_channel *ch)
  370. {
  371. unsigned long irq_flags;
  372. struct xpc_registration *registration = &xpc_registrations[ch->number];
  373. if (mutex_trylock(&registration->mutex) == 0)
  374. return xpRetry;
  375. if (!XPC_CHANNEL_REGISTERED(ch->number)) {
  376. mutex_unlock(&registration->mutex);
  377. return xpUnregistered;
  378. }
  379. spin_lock_irqsave(&ch->lock, irq_flags);
  380. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  381. DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
  382. if (ch->flags & XPC_C_DISCONNECTING) {
  383. spin_unlock_irqrestore(&ch->lock, irq_flags);
  384. mutex_unlock(&registration->mutex);
  385. return ch->reason;
  386. }
  387. /* add info from the channel connect registration to the channel */
  388. ch->kthreads_assigned_limit = registration->assigned_limit;
  389. ch->kthreads_idle_limit = registration->idle_limit;
  390. DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
  391. DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
  392. DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
  393. ch->func = registration->func;
  394. DBUG_ON(registration->func == NULL);
  395. ch->key = registration->key;
  396. ch->local_nentries = registration->nentries;
  397. if (ch->flags & XPC_C_ROPENREQUEST) {
  398. if (registration->entry_size != ch->entry_size) {
  399. /* the local and remote sides aren't the same */
  400. /*
  401. * Because XPC_DISCONNECT_CHANNEL() can block we're
  402. * forced to up the registration sema before we unlock
  403. * the channel lock. But that's okay here because we're
  404. * done with the part that required the registration
  405. * sema. XPC_DISCONNECT_CHANNEL() requires that the
  406. * channel lock be locked and will unlock and relock
  407. * the channel lock as needed.
  408. */
  409. mutex_unlock(&registration->mutex);
  410. XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
  411. &irq_flags);
  412. spin_unlock_irqrestore(&ch->lock, irq_flags);
  413. return xpUnequalMsgSizes;
  414. }
  415. } else {
  416. ch->entry_size = registration->entry_size;
  417. XPC_SET_REASON(ch, 0, 0);
  418. ch->flags &= ~XPC_C_DISCONNECTED;
  419. atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
  420. }
  421. mutex_unlock(&registration->mutex);
  422. /* initiate the connection */
  423. ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
  424. xpc_arch_ops.send_chctl_openrequest(ch, &irq_flags);
  425. xpc_process_connect(ch, &irq_flags);
  426. spin_unlock_irqrestore(&ch->lock, irq_flags);
  427. return xpSuccess;
  428. }
  429. void
  430. xpc_process_sent_chctl_flags(struct xpc_partition *part)
  431. {
  432. unsigned long irq_flags;
  433. union xpc_channel_ctl_flags chctl;
  434. struct xpc_channel *ch;
  435. int ch_number;
  436. u32 ch_flags;
  437. chctl.all_flags = xpc_arch_ops.get_chctl_all_flags(part);
  438. /*
  439. * Initiate channel connections for registered channels.
  440. *
  441. * For each connected channel that has pending messages activate idle
  442. * kthreads and/or create new kthreads as needed.
  443. */
  444. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  445. ch = &part->channels[ch_number];
  446. /*
  447. * Process any open or close related chctl flags, and then deal
  448. * with connecting or disconnecting the channel as required.
  449. */
  450. if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
  451. xpc_process_openclose_chctl_flags(part, ch_number,
  452. chctl.flags[ch_number]);
  453. }
  454. ch_flags = ch->flags; /* need an atomic snapshot of flags */
  455. if (ch_flags & XPC_C_DISCONNECTING) {
  456. spin_lock_irqsave(&ch->lock, irq_flags);
  457. xpc_process_disconnect(ch, &irq_flags);
  458. spin_unlock_irqrestore(&ch->lock, irq_flags);
  459. continue;
  460. }
  461. if (part->act_state == XPC_P_AS_DEACTIVATING)
  462. continue;
  463. if (!(ch_flags & XPC_C_CONNECTED)) {
  464. if (!(ch_flags & XPC_C_OPENREQUEST)) {
  465. DBUG_ON(ch_flags & XPC_C_SETUP);
  466. (void)xpc_connect_channel(ch);
  467. }
  468. continue;
  469. }
  470. /*
  471. * Process any message related chctl flags, this may involve
  472. * the activation of kthreads to deliver any pending messages
  473. * sent from the other partition.
  474. */
  475. if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
  476. xpc_arch_ops.process_msg_chctl_flags(part, ch_number);
  477. }
  478. }
  479. /*
  480. * XPC's heartbeat code calls this function to inform XPC that a partition is
  481. * going down. XPC responds by tearing down the XPartition Communication
  482. * infrastructure used for the just downed partition.
  483. *
  484. * XPC's heartbeat code will never call this function and xpc_partition_up()
  485. * at the same time. Nor will it ever make multiple calls to either function
  486. * at the same time.
  487. */
  488. void
  489. xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
  490. {
  491. unsigned long irq_flags;
  492. int ch_number;
  493. struct xpc_channel *ch;
  494. dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
  495. XPC_PARTID(part), reason);
  496. if (!xpc_part_ref(part)) {
  497. /* infrastructure for this partition isn't currently set up */
  498. return;
  499. }
  500. /* disconnect channels associated with the partition going down */
  501. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  502. ch = &part->channels[ch_number];
  503. xpc_msgqueue_ref(ch);
  504. spin_lock_irqsave(&ch->lock, irq_flags);
  505. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  506. spin_unlock_irqrestore(&ch->lock, irq_flags);
  507. xpc_msgqueue_deref(ch);
  508. }
  509. xpc_wakeup_channel_mgr(part);
  510. xpc_part_deref(part);
  511. }
  512. /*
  513. * Called by XP at the time of channel connection registration to cause
  514. * XPC to establish connections to all currently active partitions.
  515. */
  516. void
  517. xpc_initiate_connect(int ch_number)
  518. {
  519. short partid;
  520. struct xpc_partition *part;
  521. DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
  522. for (partid = 0; partid < xp_max_npartitions; partid++) {
  523. part = &xpc_partitions[partid];
  524. if (xpc_part_ref(part)) {
  525. /*
  526. * Initiate the establishment of a connection on the
  527. * newly registered channel to the remote partition.
  528. */
  529. xpc_wakeup_channel_mgr(part);
  530. xpc_part_deref(part);
  531. }
  532. }
  533. }
  534. void
  535. xpc_connected_callout(struct xpc_channel *ch)
  536. {
  537. /* let the registerer know that a connection has been established */
  538. if (ch->func != NULL) {
  539. dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
  540. "partid=%d, channel=%d\n", ch->partid, ch->number);
  541. ch->func(xpConnected, ch->partid, ch->number,
  542. (void *)(u64)ch->local_nentries, ch->key);
  543. dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
  544. "partid=%d, channel=%d\n", ch->partid, ch->number);
  545. }
  546. }
  547. /*
  548. * Called by XP at the time of channel connection unregistration to cause
  549. * XPC to teardown all current connections for the specified channel.
  550. *
  551. * Before returning xpc_initiate_disconnect() will wait until all connections
  552. * on the specified channel have been closed/torndown. So the caller can be
  553. * assured that they will not be receiving any more callouts from XPC to the
  554. * function they registered via xpc_connect().
  555. *
  556. * Arguments:
  557. *
  558. * ch_number - channel # to unregister.
  559. */
  560. void
  561. xpc_initiate_disconnect(int ch_number)
  562. {
  563. unsigned long irq_flags;
  564. short partid;
  565. struct xpc_partition *part;
  566. struct xpc_channel *ch;
  567. DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
  568. /* initiate the channel disconnect for every active partition */
  569. for (partid = 0; partid < xp_max_npartitions; partid++) {
  570. part = &xpc_partitions[partid];
  571. if (xpc_part_ref(part)) {
  572. ch = &part->channels[ch_number];
  573. xpc_msgqueue_ref(ch);
  574. spin_lock_irqsave(&ch->lock, irq_flags);
  575. if (!(ch->flags & XPC_C_DISCONNECTED)) {
  576. ch->flags |= XPC_C_WDISCONNECT;
  577. XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
  578. &irq_flags);
  579. }
  580. spin_unlock_irqrestore(&ch->lock, irq_flags);
  581. xpc_msgqueue_deref(ch);
  582. xpc_part_deref(part);
  583. }
  584. }
  585. xpc_disconnect_wait(ch_number);
  586. }
  587. /*
  588. * To disconnect a channel, and reflect it back to all who may be waiting.
  589. *
  590. * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
  591. * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
  592. * xpc_disconnect_wait().
  593. *
  594. * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
  595. */
  596. void
  597. xpc_disconnect_channel(const int line, struct xpc_channel *ch,
  598. enum xp_retval reason, unsigned long *irq_flags)
  599. {
  600. u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
  601. lockdep_assert_held(&ch->lock);
  602. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
  603. return;
  604. DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
  605. dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
  606. reason, line, ch->partid, ch->number);
  607. XPC_SET_REASON(ch, reason, line);
  608. ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
  609. /* some of these may not have been set */
  610. ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
  611. XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  612. XPC_C_CONNECTING | XPC_C_CONNECTED);
  613. xpc_arch_ops.send_chctl_closerequest(ch, irq_flags);
  614. if (channel_was_connected)
  615. ch->flags |= XPC_C_WASCONNECTED;
  616. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  617. /* wake all idle kthreads so they can exit */
  618. if (atomic_read(&ch->kthreads_idle) > 0) {
  619. wake_up_all(&ch->idle_wq);
  620. } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  621. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
  622. /* start a kthread that will do the xpDisconnecting callout */
  623. xpc_create_kthreads(ch, 1, 1);
  624. }
  625. /* wake those waiting to allocate an entry from the local msg queue */
  626. if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
  627. wake_up(&ch->msg_allocate_wq);
  628. spin_lock_irqsave(&ch->lock, *irq_flags);
  629. }
  630. void
  631. xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
  632. {
  633. /*
  634. * Let the channel's registerer know that the channel is being
  635. * disconnected. We don't want to do this if the registerer was never
  636. * informed of a connection being made.
  637. */
  638. if (ch->func != NULL) {
  639. dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
  640. "channel=%d\n", reason, ch->partid, ch->number);
  641. ch->func(reason, ch->partid, ch->number, NULL, ch->key);
  642. dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
  643. "channel=%d\n", reason, ch->partid, ch->number);
  644. }
  645. }
  646. /*
  647. * Wait for a message entry to become available for the specified channel,
  648. * but don't wait any longer than 1 jiffy.
  649. */
  650. enum xp_retval
  651. xpc_allocate_msg_wait(struct xpc_channel *ch)
  652. {
  653. enum xp_retval ret;
  654. DEFINE_WAIT(wait);
  655. if (ch->flags & XPC_C_DISCONNECTING) {
  656. DBUG_ON(ch->reason == xpInterrupted);
  657. return ch->reason;
  658. }
  659. atomic_inc(&ch->n_on_msg_allocate_wq);
  660. prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
  661. ret = schedule_timeout(1);
  662. finish_wait(&ch->msg_allocate_wq, &wait);
  663. atomic_dec(&ch->n_on_msg_allocate_wq);
  664. if (ch->flags & XPC_C_DISCONNECTING) {
  665. ret = ch->reason;
  666. DBUG_ON(ch->reason == xpInterrupted);
  667. } else if (ret == 0) {
  668. ret = xpTimeout;
  669. } else {
  670. ret = xpInterrupted;
  671. }
  672. return ret;
  673. }
  674. /*
  675. * Send a message that contains the user's payload on the specified channel
  676. * connected to the specified partition.
  677. *
  678. * NOTE that this routine can sleep waiting for a message entry to become
  679. * available. To not sleep, pass in the XPC_NOWAIT flag.
  680. *
  681. * Once sent, this routine will not wait for the message to be received, nor
  682. * will notification be given when it does happen.
  683. *
  684. * Arguments:
  685. *
  686. * partid - ID of partition to which the channel is connected.
  687. * ch_number - channel # to send message on.
  688. * flags - see xp.h for valid flags.
  689. * payload - pointer to the payload which is to be sent.
  690. * payload_size - size of the payload in bytes.
  691. */
  692. enum xp_retval
  693. xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
  694. u16 payload_size)
  695. {
  696. struct xpc_partition *part = &xpc_partitions[partid];
  697. enum xp_retval ret = xpUnknownReason;
  698. dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
  699. partid, ch_number);
  700. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  701. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  702. DBUG_ON(payload == NULL);
  703. if (xpc_part_ref(part)) {
  704. ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
  705. flags, payload, payload_size, 0, NULL, NULL);
  706. xpc_part_deref(part);
  707. }
  708. return ret;
  709. }
  710. /*
  711. * Send a message that contains the user's payload on the specified channel
  712. * connected to the specified partition.
  713. *
  714. * NOTE that this routine can sleep waiting for a message entry to become
  715. * available. To not sleep, pass in the XPC_NOWAIT flag.
  716. *
  717. * This routine will not wait for the message to be sent or received.
  718. *
  719. * Once the remote end of the channel has received the message, the function
  720. * passed as an argument to xpc_initiate_send_notify() will be called. This
  721. * allows the sender to free up or re-use any buffers referenced by the
  722. * message, but does NOT mean the message has been processed at the remote
  723. * end by a receiver.
  724. *
  725. * If this routine returns an error, the caller's function will NOT be called.
  726. *
  727. * Arguments:
  728. *
  729. * partid - ID of partition to which the channel is connected.
  730. * ch_number - channel # to send message on.
  731. * flags - see xp.h for valid flags.
  732. * payload - pointer to the payload which is to be sent.
  733. * payload_size - size of the payload in bytes.
  734. * func - function to call with asynchronous notification of message
  735. * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
  736. * key - user-defined key to be passed to the function when it's called.
  737. */
  738. enum xp_retval
  739. xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
  740. u16 payload_size, xpc_notify_func func, void *key)
  741. {
  742. struct xpc_partition *part = &xpc_partitions[partid];
  743. enum xp_retval ret = xpUnknownReason;
  744. dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
  745. partid, ch_number);
  746. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  747. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  748. DBUG_ON(payload == NULL);
  749. DBUG_ON(func == NULL);
  750. if (xpc_part_ref(part)) {
  751. ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
  752. flags, payload, payload_size, XPC_N_CALL, func, key);
  753. xpc_part_deref(part);
  754. }
  755. return ret;
  756. }
  757. /*
  758. * Deliver a message's payload to its intended recipient.
  759. */
  760. void
  761. xpc_deliver_payload(struct xpc_channel *ch)
  762. {
  763. void *payload;
  764. payload = xpc_arch_ops.get_deliverable_payload(ch);
  765. if (payload != NULL) {
  766. /*
  767. * This ref is taken to protect the payload itself from being
  768. * freed before the user is finished with it, which the user
  769. * indicates by calling xpc_initiate_received().
  770. */
  771. xpc_msgqueue_ref(ch);
  772. atomic_inc(&ch->kthreads_active);
  773. if (ch->func != NULL) {
  774. dev_dbg(xpc_chan, "ch->func() called, payload=0x%p "
  775. "partid=%d channel=%d\n", payload, ch->partid,
  776. ch->number);
  777. /* deliver the message to its intended recipient */
  778. ch->func(xpMsgReceived, ch->partid, ch->number, payload,
  779. ch->key);
  780. dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p "
  781. "partid=%d channel=%d\n", payload, ch->partid,
  782. ch->number);
  783. }
  784. atomic_dec(&ch->kthreads_active);
  785. }
  786. }
  787. /*
  788. * Acknowledge receipt of a delivered message's payload.
  789. *
  790. * This function, although called by users, does not call xpc_part_ref() to
  791. * ensure that the partition infrastructure is in place. It relies on the
  792. * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload().
  793. *
  794. * Arguments:
  795. *
  796. * partid - ID of partition to which the channel is connected.
  797. * ch_number - channel # message received on.
  798. * payload - pointer to the payload area allocated via
  799. * xpc_initiate_send() or xpc_initiate_send_notify().
  800. */
  801. void
  802. xpc_initiate_received(short partid, int ch_number, void *payload)
  803. {
  804. struct xpc_partition *part = &xpc_partitions[partid];
  805. struct xpc_channel *ch;
  806. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  807. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  808. ch = &part->channels[ch_number];
  809. xpc_arch_ops.received_payload(ch, payload);
  810. /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */
  811. xpc_msgqueue_deref(ch);
  812. }