operation.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Greybus operations
  4. *
  5. * Copyright 2014-2015 Google Inc.
  6. * Copyright 2014-2015 Linaro Ltd.
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/slab.h>
  10. #include <linux/module.h>
  11. #include <linux/sched.h>
  12. #include <linux/wait.h>
  13. #include <linux/workqueue.h>
  14. #include <linux/greybus.h>
  15. #include "greybus_trace.h"
  16. static struct kmem_cache *gb_operation_cache;
  17. static struct kmem_cache *gb_message_cache;
  18. /* Workqueue to handle Greybus operation completions. */
  19. static struct workqueue_struct *gb_operation_completion_wq;
  20. /* Wait queue for synchronous cancellations. */
  21. static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
  22. /*
  23. * Protects updates to operation->errno.
  24. */
  25. static DEFINE_SPINLOCK(gb_operations_lock);
  26. static int gb_operation_response_send(struct gb_operation *operation,
  27. int errno);
  28. /*
  29. * Increment operation active count and add to connection list unless the
  30. * connection is going away.
  31. *
  32. * Caller holds operation reference.
  33. */
  34. static int gb_operation_get_active(struct gb_operation *operation)
  35. {
  36. struct gb_connection *connection = operation->connection;
  37. unsigned long flags;
  38. spin_lock_irqsave(&connection->lock, flags);
  39. switch (connection->state) {
  40. case GB_CONNECTION_STATE_ENABLED:
  41. break;
  42. case GB_CONNECTION_STATE_ENABLED_TX:
  43. if (gb_operation_is_incoming(operation))
  44. goto err_unlock;
  45. break;
  46. case GB_CONNECTION_STATE_DISCONNECTING:
  47. if (!gb_operation_is_core(operation))
  48. goto err_unlock;
  49. break;
  50. default:
  51. goto err_unlock;
  52. }
  53. if (operation->active++ == 0)
  54. list_add_tail(&operation->links, &connection->operations);
  55. trace_gb_operation_get_active(operation);
  56. spin_unlock_irqrestore(&connection->lock, flags);
  57. return 0;
  58. err_unlock:
  59. spin_unlock_irqrestore(&connection->lock, flags);
  60. return -ENOTCONN;
  61. }
  62. /* Caller holds operation reference. */
  63. static void gb_operation_put_active(struct gb_operation *operation)
  64. {
  65. struct gb_connection *connection = operation->connection;
  66. unsigned long flags;
  67. spin_lock_irqsave(&connection->lock, flags);
  68. trace_gb_operation_put_active(operation);
  69. if (--operation->active == 0) {
  70. list_del(&operation->links);
  71. if (atomic_read(&operation->waiters))
  72. wake_up(&gb_operation_cancellation_queue);
  73. }
  74. spin_unlock_irqrestore(&connection->lock, flags);
  75. }
  76. static bool gb_operation_is_active(struct gb_operation *operation)
  77. {
  78. struct gb_connection *connection = operation->connection;
  79. unsigned long flags;
  80. bool ret;
  81. spin_lock_irqsave(&connection->lock, flags);
  82. ret = operation->active;
  83. spin_unlock_irqrestore(&connection->lock, flags);
  84. return ret;
  85. }
  86. /*
  87. * Set an operation's result.
  88. *
  89. * Initially an outgoing operation's errno value is -EBADR.
  90. * If no error occurs before sending the request message the only
  91. * valid value operation->errno can be set to is -EINPROGRESS,
  92. * indicating the request has been (or rather is about to be) sent.
  93. * At that point nobody should be looking at the result until the
  94. * response arrives.
  95. *
  96. * The first time the result gets set after the request has been
  97. * sent, that result "sticks." That is, if two concurrent threads
  98. * race to set the result, the first one wins. The return value
  99. * tells the caller whether its result was recorded; if not the
  100. * caller has nothing more to do.
  101. *
  102. * The result value -EILSEQ is reserved to signal an implementation
  103. * error; if it's ever observed, the code performing the request has
  104. * done something fundamentally wrong. It is an error to try to set
  105. * the result to -EBADR, and attempts to do so result in a warning,
  106. * and -EILSEQ is used instead. Similarly, the only valid result
  107. * value to set for an operation in initial state is -EINPROGRESS.
  108. * Attempts to do otherwise will also record a (successful) -EILSEQ
  109. * operation result.
  110. */
  111. static bool gb_operation_result_set(struct gb_operation *operation, int result)
  112. {
  113. unsigned long flags;
  114. int prev;
  115. if (result == -EINPROGRESS) {
  116. /*
  117. * -EINPROGRESS is used to indicate the request is
  118. * in flight. It should be the first result value
  119. * set after the initial -EBADR. Issue a warning
  120. * and record an implementation error if it's
  121. * set at any other time.
  122. */
  123. spin_lock_irqsave(&gb_operations_lock, flags);
  124. prev = operation->errno;
  125. if (prev == -EBADR)
  126. operation->errno = result;
  127. else
  128. operation->errno = -EILSEQ;
  129. spin_unlock_irqrestore(&gb_operations_lock, flags);
  130. WARN_ON(prev != -EBADR);
  131. return true;
  132. }
  133. /*
  134. * The first result value set after a request has been sent
  135. * will be the final result of the operation. Subsequent
  136. * attempts to set the result are ignored.
  137. *
  138. * Note that -EBADR is a reserved "initial state" result
  139. * value. Attempts to set this value result in a warning,
  140. * and the result code is set to -EILSEQ instead.
  141. */
  142. if (WARN_ON(result == -EBADR))
  143. result = -EILSEQ; /* Nobody should be setting -EBADR */
  144. spin_lock_irqsave(&gb_operations_lock, flags);
  145. prev = operation->errno;
  146. if (prev == -EINPROGRESS)
  147. operation->errno = result; /* First and final result */
  148. spin_unlock_irqrestore(&gb_operations_lock, flags);
  149. return prev == -EINPROGRESS;
  150. }
  151. int gb_operation_result(struct gb_operation *operation)
  152. {
  153. int result = operation->errno;
  154. WARN_ON(result == -EBADR);
  155. WARN_ON(result == -EINPROGRESS);
  156. return result;
  157. }
  158. EXPORT_SYMBOL_GPL(gb_operation_result);
  159. /*
  160. * Looks up an outgoing operation on a connection and returns a refcounted
  161. * pointer if found, or NULL otherwise.
  162. */
  163. static struct gb_operation *
  164. gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
  165. {
  166. struct gb_operation *operation;
  167. unsigned long flags;
  168. bool found = false;
  169. spin_lock_irqsave(&connection->lock, flags);
  170. list_for_each_entry(operation, &connection->operations, links)
  171. if (operation->id == operation_id &&
  172. !gb_operation_is_incoming(operation)) {
  173. gb_operation_get(operation);
  174. found = true;
  175. break;
  176. }
  177. spin_unlock_irqrestore(&connection->lock, flags);
  178. return found ? operation : NULL;
  179. }
  180. static int gb_message_send(struct gb_message *message, gfp_t gfp)
  181. {
  182. struct gb_connection *connection = message->operation->connection;
  183. trace_gb_message_send(message);
  184. return connection->hd->driver->message_send(connection->hd,
  185. connection->hd_cport_id,
  186. message,
  187. gfp);
  188. }
  189. /*
  190. * Cancel a message we have passed to the host device layer to be sent.
  191. */
  192. static void gb_message_cancel(struct gb_message *message)
  193. {
  194. struct gb_host_device *hd = message->operation->connection->hd;
  195. hd->driver->message_cancel(message);
  196. }
  197. static void gb_operation_request_handle(struct gb_operation *operation)
  198. {
  199. struct gb_connection *connection = operation->connection;
  200. int status;
  201. int ret;
  202. if (connection->handler) {
  203. status = connection->handler(operation);
  204. } else {
  205. dev_err(&connection->hd->dev,
  206. "%s: unexpected incoming request of type 0x%02x\n",
  207. connection->name, operation->type);
  208. status = -EPROTONOSUPPORT;
  209. }
  210. ret = gb_operation_response_send(operation, status);
  211. if (ret) {
  212. dev_err(&connection->hd->dev,
  213. "%s: failed to send response %d for type 0x%02x: %d\n",
  214. connection->name, status, operation->type, ret);
  215. return;
  216. }
  217. }
  218. /*
  219. * Process operation work.
  220. *
  221. * For incoming requests, call the protocol request handler. The operation
  222. * result should be -EINPROGRESS at this point.
  223. *
  224. * For outgoing requests, the operation result value should have
  225. * been set before queueing this. The operation callback function
  226. * allows the original requester to know the request has completed
  227. * and its result is available.
  228. */
  229. static void gb_operation_work(struct work_struct *work)
  230. {
  231. struct gb_operation *operation;
  232. int ret;
  233. operation = container_of(work, struct gb_operation, work);
  234. if (gb_operation_is_incoming(operation)) {
  235. gb_operation_request_handle(operation);
  236. } else {
  237. ret = del_timer_sync(&operation->timer);
  238. if (!ret) {
  239. /* Cancel request message if scheduled by timeout. */
  240. if (gb_operation_result(operation) == -ETIMEDOUT)
  241. gb_message_cancel(operation->request);
  242. }
  243. operation->callback(operation);
  244. }
  245. gb_operation_put_active(operation);
  246. gb_operation_put(operation);
  247. }
  248. static void gb_operation_timeout(struct timer_list *t)
  249. {
  250. struct gb_operation *operation = from_timer(operation, t, timer);
  251. if (gb_operation_result_set(operation, -ETIMEDOUT)) {
  252. /*
  253. * A stuck request message will be cancelled from the
  254. * workqueue.
  255. */
  256. queue_work(gb_operation_completion_wq, &operation->work);
  257. }
  258. }
  259. static void gb_operation_message_init(struct gb_host_device *hd,
  260. struct gb_message *message,
  261. u16 operation_id,
  262. size_t payload_size, u8 type)
  263. {
  264. struct gb_operation_msg_hdr *header;
  265. header = message->buffer;
  266. message->header = header;
  267. message->payload = payload_size ? header + 1 : NULL;
  268. message->payload_size = payload_size;
  269. /*
  270. * The type supplied for incoming message buffers will be
  271. * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
  272. * arriving data so there's no need to initialize the message header.
  273. */
  274. if (type != GB_REQUEST_TYPE_INVALID) {
  275. u16 message_size = (u16)(sizeof(*header) + payload_size);
  276. /*
  277. * For a request, the operation id gets filled in
  278. * when the message is sent. For a response, it
  279. * will be copied from the request by the caller.
  280. *
  281. * The result field in a request message must be
  282. * zero. It will be set just prior to sending for
  283. * a response.
  284. */
  285. header->size = cpu_to_le16(message_size);
  286. header->operation_id = 0;
  287. header->type = type;
  288. header->result = 0;
  289. }
  290. }
  291. /*
  292. * Allocate a message to be used for an operation request or response.
  293. * Both types of message contain a common header. The request message
  294. * for an outgoing operation is outbound, as is the response message
  295. * for an incoming operation. The message header for an outbound
  296. * message is partially initialized here.
  297. *
  298. * The headers for inbound messages don't need to be initialized;
  299. * they'll be filled in by arriving data.
  300. *
  301. * Our message buffers have the following layout:
  302. * message header \_ these combined are
  303. * message payload / the message size
  304. */
  305. static struct gb_message *
  306. gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
  307. size_t payload_size, gfp_t gfp_flags)
  308. {
  309. struct gb_message *message;
  310. struct gb_operation_msg_hdr *header;
  311. size_t message_size = payload_size + sizeof(*header);
  312. if (message_size > hd->buffer_size_max) {
  313. dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
  314. message_size, hd->buffer_size_max);
  315. return NULL;
  316. }
  317. /* Allocate the message structure and buffer. */
  318. message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
  319. if (!message)
  320. return NULL;
  321. message->buffer = kzalloc(message_size, gfp_flags);
  322. if (!message->buffer)
  323. goto err_free_message;
  324. /* Initialize the message. Operation id is filled in later. */
  325. gb_operation_message_init(hd, message, 0, payload_size, type);
  326. return message;
  327. err_free_message:
  328. kmem_cache_free(gb_message_cache, message);
  329. return NULL;
  330. }
  331. static void gb_operation_message_free(struct gb_message *message)
  332. {
  333. kfree(message->buffer);
  334. kmem_cache_free(gb_message_cache, message);
  335. }
  336. /*
  337. * Map an enum gb_operation_status value (which is represented in a
  338. * message as a single byte) to an appropriate Linux negative errno.
  339. */
  340. static int gb_operation_status_map(u8 status)
  341. {
  342. switch (status) {
  343. case GB_OP_SUCCESS:
  344. return 0;
  345. case GB_OP_INTERRUPTED:
  346. return -EINTR;
  347. case GB_OP_TIMEOUT:
  348. return -ETIMEDOUT;
  349. case GB_OP_NO_MEMORY:
  350. return -ENOMEM;
  351. case GB_OP_PROTOCOL_BAD:
  352. return -EPROTONOSUPPORT;
  353. case GB_OP_OVERFLOW:
  354. return -EMSGSIZE;
  355. case GB_OP_INVALID:
  356. return -EINVAL;
  357. case GB_OP_RETRY:
  358. return -EAGAIN;
  359. case GB_OP_NONEXISTENT:
  360. return -ENODEV;
  361. case GB_OP_MALFUNCTION:
  362. return -EILSEQ;
  363. case GB_OP_UNKNOWN_ERROR:
  364. default:
  365. return -EIO;
  366. }
  367. }
  368. /*
  369. * Map a Linux errno value (from operation->errno) into the value
  370. * that should represent it in a response message status sent
  371. * over the wire. Returns an enum gb_operation_status value (which
  372. * is represented in a message as a single byte).
  373. */
  374. static u8 gb_operation_errno_map(int errno)
  375. {
  376. switch (errno) {
  377. case 0:
  378. return GB_OP_SUCCESS;
  379. case -EINTR:
  380. return GB_OP_INTERRUPTED;
  381. case -ETIMEDOUT:
  382. return GB_OP_TIMEOUT;
  383. case -ENOMEM:
  384. return GB_OP_NO_MEMORY;
  385. case -EPROTONOSUPPORT:
  386. return GB_OP_PROTOCOL_BAD;
  387. case -EMSGSIZE:
  388. return GB_OP_OVERFLOW; /* Could be underflow too */
  389. case -EINVAL:
  390. return GB_OP_INVALID;
  391. case -EAGAIN:
  392. return GB_OP_RETRY;
  393. case -EILSEQ:
  394. return GB_OP_MALFUNCTION;
  395. case -ENODEV:
  396. return GB_OP_NONEXISTENT;
  397. case -EIO:
  398. default:
  399. return GB_OP_UNKNOWN_ERROR;
  400. }
  401. }
  402. bool gb_operation_response_alloc(struct gb_operation *operation,
  403. size_t response_size, gfp_t gfp)
  404. {
  405. struct gb_host_device *hd = operation->connection->hd;
  406. struct gb_operation_msg_hdr *request_header;
  407. struct gb_message *response;
  408. u8 type;
  409. type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
  410. response = gb_operation_message_alloc(hd, type, response_size, gfp);
  411. if (!response)
  412. return false;
  413. response->operation = operation;
  414. /*
  415. * Size and type get initialized when the message is
  416. * allocated. The errno will be set before sending. All
  417. * that's left is the operation id, which we copy from the
  418. * request message header (as-is, in little-endian order).
  419. */
  420. request_header = operation->request->header;
  421. response->header->operation_id = request_header->operation_id;
  422. operation->response = response;
  423. return true;
  424. }
  425. EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
  426. /*
  427. * Create a Greybus operation to be sent over the given connection.
  428. * The request buffer will be big enough for a payload of the given
  429. * size.
  430. *
  431. * For outgoing requests, the request message's header will be
  432. * initialized with the type of the request and the message size.
  433. * Outgoing operations must also specify the response buffer size,
  434. * which must be sufficient to hold all expected response data. The
  435. * response message header will eventually be overwritten, so there's
  436. * no need to initialize it here.
  437. *
  438. * Request messages for incoming operations can arrive in interrupt
  439. * context, so they must be allocated with GFP_ATOMIC. In this case
  440. * the request buffer will be immediately overwritten, so there is
  441. * no need to initialize the message header. Responsibility for
  442. * allocating a response buffer lies with the incoming request
  443. * handler for a protocol. So we don't allocate that here.
  444. *
  445. * Returns a pointer to the new operation or a null pointer if an
  446. * error occurs.
  447. */
  448. static struct gb_operation *
  449. gb_operation_create_common(struct gb_connection *connection, u8 type,
  450. size_t request_size, size_t response_size,
  451. unsigned long op_flags, gfp_t gfp_flags)
  452. {
  453. struct gb_host_device *hd = connection->hd;
  454. struct gb_operation *operation;
  455. operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
  456. if (!operation)
  457. return NULL;
  458. operation->connection = connection;
  459. operation->request = gb_operation_message_alloc(hd, type, request_size,
  460. gfp_flags);
  461. if (!operation->request)
  462. goto err_cache;
  463. operation->request->operation = operation;
  464. /* Allocate the response buffer for outgoing operations */
  465. if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
  466. if (!gb_operation_response_alloc(operation, response_size,
  467. gfp_flags)) {
  468. goto err_request;
  469. }
  470. timer_setup(&operation->timer, gb_operation_timeout, 0);
  471. }
  472. operation->flags = op_flags;
  473. operation->type = type;
  474. operation->errno = -EBADR; /* Initial value--means "never set" */
  475. INIT_WORK(&operation->work, gb_operation_work);
  476. init_completion(&operation->completion);
  477. kref_init(&operation->kref);
  478. atomic_set(&operation->waiters, 0);
  479. return operation;
  480. err_request:
  481. gb_operation_message_free(operation->request);
  482. err_cache:
  483. kmem_cache_free(gb_operation_cache, operation);
  484. return NULL;
  485. }
  486. /*
  487. * Create a new operation associated with the given connection. The
  488. * request and response sizes provided are the number of bytes
  489. * required to hold the request/response payload only. Both of
  490. * these are allowed to be 0. Note that 0x00 is reserved as an
  491. * invalid operation type for all protocols, and this is enforced
  492. * here.
  493. */
  494. struct gb_operation *
  495. gb_operation_create_flags(struct gb_connection *connection,
  496. u8 type, size_t request_size,
  497. size_t response_size, unsigned long flags,
  498. gfp_t gfp)
  499. {
  500. struct gb_operation *operation;
  501. if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
  502. return NULL;
  503. if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
  504. type &= ~GB_MESSAGE_TYPE_RESPONSE;
  505. if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
  506. flags &= GB_OPERATION_FLAG_USER_MASK;
  507. operation = gb_operation_create_common(connection, type,
  508. request_size, response_size,
  509. flags, gfp);
  510. if (operation)
  511. trace_gb_operation_create(operation);
  512. return operation;
  513. }
  514. EXPORT_SYMBOL_GPL(gb_operation_create_flags);
  515. struct gb_operation *
  516. gb_operation_create_core(struct gb_connection *connection,
  517. u8 type, size_t request_size,
  518. size_t response_size, unsigned long flags,
  519. gfp_t gfp)
  520. {
  521. struct gb_operation *operation;
  522. flags |= GB_OPERATION_FLAG_CORE;
  523. operation = gb_operation_create_common(connection, type,
  524. request_size, response_size,
  525. flags, gfp);
  526. if (operation)
  527. trace_gb_operation_create_core(operation);
  528. return operation;
  529. }
  530. /* Do not export this function. */
  531. size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
  532. {
  533. struct gb_host_device *hd = connection->hd;
  534. return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
  535. }
  536. EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
  537. static struct gb_operation *
  538. gb_operation_create_incoming(struct gb_connection *connection, u16 id,
  539. u8 type, void *data, size_t size)
  540. {
  541. struct gb_operation *operation;
  542. size_t request_size;
  543. unsigned long flags = GB_OPERATION_FLAG_INCOMING;
  544. /* Caller has made sure we at least have a message header. */
  545. request_size = size - sizeof(struct gb_operation_msg_hdr);
  546. if (!id)
  547. flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
  548. operation = gb_operation_create_common(connection, type,
  549. request_size,
  550. GB_REQUEST_TYPE_INVALID,
  551. flags, GFP_ATOMIC);
  552. if (!operation)
  553. return NULL;
  554. operation->id = id;
  555. memcpy(operation->request->header, data, size);
  556. trace_gb_operation_create_incoming(operation);
  557. return operation;
  558. }
  559. /*
  560. * Get an additional reference on an operation.
  561. */
  562. void gb_operation_get(struct gb_operation *operation)
  563. {
  564. kref_get(&operation->kref);
  565. }
  566. EXPORT_SYMBOL_GPL(gb_operation_get);
  567. /*
  568. * Destroy a previously created operation.
  569. */
  570. static void _gb_operation_destroy(struct kref *kref)
  571. {
  572. struct gb_operation *operation;
  573. operation = container_of(kref, struct gb_operation, kref);
  574. trace_gb_operation_destroy(operation);
  575. if (operation->response)
  576. gb_operation_message_free(operation->response);
  577. gb_operation_message_free(operation->request);
  578. kmem_cache_free(gb_operation_cache, operation);
  579. }
  580. /*
  581. * Drop a reference on an operation, and destroy it when the last
  582. * one is gone.
  583. */
  584. void gb_operation_put(struct gb_operation *operation)
  585. {
  586. if (WARN_ON(!operation))
  587. return;
  588. kref_put(&operation->kref, _gb_operation_destroy);
  589. }
  590. EXPORT_SYMBOL_GPL(gb_operation_put);
  591. /* Tell the requester we're done */
  592. static void gb_operation_sync_callback(struct gb_operation *operation)
  593. {
  594. complete(&operation->completion);
  595. }
  596. /**
  597. * gb_operation_request_send() - send an operation request message
  598. * @operation: the operation to initiate
  599. * @callback: the operation completion callback
  600. * @timeout: operation timeout in milliseconds, or zero for no timeout
  601. * @gfp: the memory flags to use for any allocations
  602. *
  603. * The caller has filled in any payload so the request message is ready to go.
  604. * The callback function supplied will be called when the response message has
  605. * arrived, a unidirectional request has been sent, or the operation is
  606. * cancelled, indicating that the operation is complete. The callback function
  607. * can fetch the result of the operation using gb_operation_result() if
  608. * desired.
  609. *
  610. * Return: 0 if the request was successfully queued in the host-driver queues,
  611. * or a negative errno.
  612. */
  613. int gb_operation_request_send(struct gb_operation *operation,
  614. gb_operation_callback callback,
  615. unsigned int timeout,
  616. gfp_t gfp)
  617. {
  618. struct gb_connection *connection = operation->connection;
  619. struct gb_operation_msg_hdr *header;
  620. unsigned int cycle;
  621. int ret;
  622. if (gb_connection_is_offloaded(connection))
  623. return -EBUSY;
  624. if (!callback)
  625. return -EINVAL;
  626. /*
  627. * Record the callback function, which is executed in
  628. * non-atomic (workqueue) context when the final result
  629. * of an operation has been set.
  630. */
  631. operation->callback = callback;
  632. /*
  633. * Assign the operation's id, and store it in the request header.
  634. * Zero is a reserved operation id for unidirectional operations.
  635. */
  636. if (gb_operation_is_unidirectional(operation)) {
  637. operation->id = 0;
  638. } else {
  639. cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
  640. operation->id = (u16)(cycle % U16_MAX + 1);
  641. }
  642. header = operation->request->header;
  643. header->operation_id = cpu_to_le16(operation->id);
  644. gb_operation_result_set(operation, -EINPROGRESS);
  645. /*
  646. * Get an extra reference on the operation. It'll be dropped when the
  647. * operation completes.
  648. */
  649. gb_operation_get(operation);
  650. ret = gb_operation_get_active(operation);
  651. if (ret)
  652. goto err_put;
  653. ret = gb_message_send(operation->request, gfp);
  654. if (ret)
  655. goto err_put_active;
  656. if (timeout) {
  657. operation->timer.expires = jiffies + msecs_to_jiffies(timeout);
  658. add_timer(&operation->timer);
  659. }
  660. return 0;
  661. err_put_active:
  662. gb_operation_put_active(operation);
  663. err_put:
  664. gb_operation_put(operation);
  665. return ret;
  666. }
  667. EXPORT_SYMBOL_GPL(gb_operation_request_send);
  668. /*
  669. * Send a synchronous operation. This function is expected to
  670. * block, returning only when the response has arrived, (or when an
  671. * error is detected. The return value is the result of the
  672. * operation.
  673. */
  674. int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
  675. unsigned int timeout)
  676. {
  677. int ret;
  678. ret = gb_operation_request_send(operation, gb_operation_sync_callback,
  679. timeout, GFP_KERNEL);
  680. if (ret)
  681. return ret;
  682. ret = wait_for_completion_interruptible(&operation->completion);
  683. if (ret < 0) {
  684. /* Cancel the operation if interrupted */
  685. gb_operation_cancel(operation, -ECANCELED);
  686. }
  687. return gb_operation_result(operation);
  688. }
  689. EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
  690. /*
  691. * Send a response for an incoming operation request. A non-zero
  692. * errno indicates a failed operation.
  693. *
  694. * If there is any response payload, the incoming request handler is
  695. * responsible for allocating the response message. Otherwise the
  696. * it can simply supply the result errno; this function will
  697. * allocate the response message if necessary.
  698. */
  699. static int gb_operation_response_send(struct gb_operation *operation,
  700. int errno)
  701. {
  702. struct gb_connection *connection = operation->connection;
  703. int ret;
  704. if (!operation->response &&
  705. !gb_operation_is_unidirectional(operation)) {
  706. if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
  707. return -ENOMEM;
  708. }
  709. /* Record the result */
  710. if (!gb_operation_result_set(operation, errno)) {
  711. dev_err(&connection->hd->dev, "request result already set\n");
  712. return -EIO; /* Shouldn't happen */
  713. }
  714. /* Sender of request does not care about response. */
  715. if (gb_operation_is_unidirectional(operation))
  716. return 0;
  717. /* Reference will be dropped when message has been sent. */
  718. gb_operation_get(operation);
  719. ret = gb_operation_get_active(operation);
  720. if (ret)
  721. goto err_put;
  722. /* Fill in the response header and send it */
  723. operation->response->header->result = gb_operation_errno_map(errno);
  724. ret = gb_message_send(operation->response, GFP_KERNEL);
  725. if (ret)
  726. goto err_put_active;
  727. return 0;
  728. err_put_active:
  729. gb_operation_put_active(operation);
  730. err_put:
  731. gb_operation_put(operation);
  732. return ret;
  733. }
  734. /*
  735. * This function is called when a message send request has completed.
  736. */
  737. void greybus_message_sent(struct gb_host_device *hd,
  738. struct gb_message *message, int status)
  739. {
  740. struct gb_operation *operation = message->operation;
  741. struct gb_connection *connection = operation->connection;
  742. /*
  743. * If the message was a response, we just need to drop our
  744. * reference to the operation. If an error occurred, report
  745. * it.
  746. *
  747. * For requests, if there's no error and the operation in not
  748. * unidirectional, there's nothing more to do until the response
  749. * arrives. If an error occurred attempting to send it, or if the
  750. * operation is unidrectional, record the result of the operation and
  751. * schedule its completion.
  752. */
  753. if (message == operation->response) {
  754. if (status) {
  755. dev_err(&connection->hd->dev,
  756. "%s: error sending response 0x%02x: %d\n",
  757. connection->name, operation->type, status);
  758. }
  759. gb_operation_put_active(operation);
  760. gb_operation_put(operation);
  761. } else if (status || gb_operation_is_unidirectional(operation)) {
  762. if (gb_operation_result_set(operation, status)) {
  763. queue_work(gb_operation_completion_wq,
  764. &operation->work);
  765. }
  766. }
  767. }
  768. EXPORT_SYMBOL_GPL(greybus_message_sent);
  769. /*
  770. * We've received data on a connection, and it doesn't look like a
  771. * response, so we assume it's a request.
  772. *
  773. * This is called in interrupt context, so just copy the incoming
  774. * data into the request buffer and handle the rest via workqueue.
  775. */
  776. static void gb_connection_recv_request(struct gb_connection *connection,
  777. const struct gb_operation_msg_hdr *header,
  778. void *data, size_t size)
  779. {
  780. struct gb_operation *operation;
  781. u16 operation_id;
  782. u8 type;
  783. int ret;
  784. operation_id = le16_to_cpu(header->operation_id);
  785. type = header->type;
  786. operation = gb_operation_create_incoming(connection, operation_id,
  787. type, data, size);
  788. if (!operation) {
  789. dev_err(&connection->hd->dev,
  790. "%s: can't create incoming operation\n",
  791. connection->name);
  792. return;
  793. }
  794. ret = gb_operation_get_active(operation);
  795. if (ret) {
  796. gb_operation_put(operation);
  797. return;
  798. }
  799. trace_gb_message_recv_request(operation->request);
  800. /*
  801. * The initial reference to the operation will be dropped when the
  802. * request handler returns.
  803. */
  804. if (gb_operation_result_set(operation, -EINPROGRESS))
  805. queue_work(connection->wq, &operation->work);
  806. }
  807. /*
  808. * We've received data that appears to be an operation response
  809. * message. Look up the operation, and record that we've received
  810. * its response.
  811. *
  812. * This is called in interrupt context, so just copy the incoming
  813. * data into the response buffer and handle the rest via workqueue.
  814. */
  815. static void gb_connection_recv_response(struct gb_connection *connection,
  816. const struct gb_operation_msg_hdr *header,
  817. void *data, size_t size)
  818. {
  819. struct gb_operation *operation;
  820. struct gb_message *message;
  821. size_t message_size;
  822. u16 operation_id;
  823. int errno;
  824. operation_id = le16_to_cpu(header->operation_id);
  825. if (!operation_id) {
  826. dev_err_ratelimited(&connection->hd->dev,
  827. "%s: invalid response id 0 received\n",
  828. connection->name);
  829. return;
  830. }
  831. operation = gb_operation_find_outgoing(connection, operation_id);
  832. if (!operation) {
  833. dev_err_ratelimited(&connection->hd->dev,
  834. "%s: unexpected response id 0x%04x received\n",
  835. connection->name, operation_id);
  836. return;
  837. }
  838. errno = gb_operation_status_map(header->result);
  839. message = operation->response;
  840. message_size = sizeof(*header) + message->payload_size;
  841. if (!errno && size > message_size) {
  842. dev_err_ratelimited(&connection->hd->dev,
  843. "%s: malformed response 0x%02x received (%zu > %zu)\n",
  844. connection->name, header->type,
  845. size, message_size);
  846. errno = -EMSGSIZE;
  847. } else if (!errno && size < message_size) {
  848. if (gb_operation_short_response_allowed(operation)) {
  849. message->payload_size = size - sizeof(*header);
  850. } else {
  851. dev_err_ratelimited(&connection->hd->dev,
  852. "%s: short response 0x%02x received (%zu < %zu)\n",
  853. connection->name, header->type,
  854. size, message_size);
  855. errno = -EMSGSIZE;
  856. }
  857. }
  858. /* We must ignore the payload if a bad status is returned */
  859. if (errno)
  860. size = sizeof(*header);
  861. /* The rest will be handled in work queue context */
  862. if (gb_operation_result_set(operation, errno)) {
  863. memcpy(message->buffer, data, size);
  864. trace_gb_message_recv_response(message);
  865. queue_work(gb_operation_completion_wq, &operation->work);
  866. }
  867. gb_operation_put(operation);
  868. }
  869. /*
  870. * Handle data arriving on a connection. As soon as we return the
  871. * supplied data buffer will be reused (so unless we do something
  872. * with, it's effectively dropped).
  873. */
  874. void gb_connection_recv(struct gb_connection *connection,
  875. void *data, size_t size)
  876. {
  877. struct gb_operation_msg_hdr header;
  878. struct device *dev = &connection->hd->dev;
  879. size_t msg_size;
  880. if (connection->state == GB_CONNECTION_STATE_DISABLED ||
  881. gb_connection_is_offloaded(connection)) {
  882. dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
  883. connection->name, size);
  884. return;
  885. }
  886. if (size < sizeof(header)) {
  887. dev_err_ratelimited(dev, "%s: short message received\n",
  888. connection->name);
  889. return;
  890. }
  891. /* Use memcpy as data may be unaligned */
  892. memcpy(&header, data, sizeof(header));
  893. msg_size = le16_to_cpu(header.size);
  894. if (size < msg_size) {
  895. dev_err_ratelimited(dev,
  896. "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
  897. connection->name,
  898. le16_to_cpu(header.operation_id),
  899. header.type, size, msg_size);
  900. return; /* XXX Should still complete operation */
  901. }
  902. if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
  903. gb_connection_recv_response(connection, &header, data,
  904. msg_size);
  905. } else {
  906. gb_connection_recv_request(connection, &header, data,
  907. msg_size);
  908. }
  909. }
  910. /*
  911. * Cancel an outgoing operation synchronously, and record the given error to
  912. * indicate why.
  913. */
  914. void gb_operation_cancel(struct gb_operation *operation, int errno)
  915. {
  916. if (WARN_ON(gb_operation_is_incoming(operation)))
  917. return;
  918. if (gb_operation_result_set(operation, errno)) {
  919. gb_message_cancel(operation->request);
  920. queue_work(gb_operation_completion_wq, &operation->work);
  921. }
  922. trace_gb_message_cancel_outgoing(operation->request);
  923. atomic_inc(&operation->waiters);
  924. wait_event(gb_operation_cancellation_queue,
  925. !gb_operation_is_active(operation));
  926. atomic_dec(&operation->waiters);
  927. }
  928. EXPORT_SYMBOL_GPL(gb_operation_cancel);
  929. /*
  930. * Cancel an incoming operation synchronously. Called during connection tear
  931. * down.
  932. */
  933. void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
  934. {
  935. if (WARN_ON(!gb_operation_is_incoming(operation)))
  936. return;
  937. if (!gb_operation_is_unidirectional(operation)) {
  938. /*
  939. * Make sure the request handler has submitted the response
  940. * before cancelling it.
  941. */
  942. flush_work(&operation->work);
  943. if (!gb_operation_result_set(operation, errno))
  944. gb_message_cancel(operation->response);
  945. }
  946. trace_gb_message_cancel_incoming(operation->response);
  947. atomic_inc(&operation->waiters);
  948. wait_event(gb_operation_cancellation_queue,
  949. !gb_operation_is_active(operation));
  950. atomic_dec(&operation->waiters);
  951. }
  952. /**
  953. * gb_operation_sync_timeout() - implement a "simple" synchronous operation
  954. * @connection: the Greybus connection to send this to
  955. * @type: the type of operation to send
  956. * @request: pointer to a memory buffer to copy the request from
  957. * @request_size: size of @request
  958. * @response: pointer to a memory buffer to copy the response to
  959. * @response_size: the size of @response.
  960. * @timeout: operation timeout in milliseconds
  961. *
  962. * This function implements a simple synchronous Greybus operation. It sends
  963. * the provided operation request and waits (sleeps) until the corresponding
  964. * operation response message has been successfully received, or an error
  965. * occurs. @request and @response are buffers to hold the request and response
  966. * data respectively, and if they are not NULL, their size must be specified in
  967. * @request_size and @response_size.
  968. *
  969. * If a response payload is to come back, and @response is not NULL,
  970. * @response_size number of bytes will be copied into @response if the operation
  971. * is successful.
  972. *
  973. * If there is an error, the response buffer is left alone.
  974. */
  975. int gb_operation_sync_timeout(struct gb_connection *connection, int type,
  976. void *request, int request_size,
  977. void *response, int response_size,
  978. unsigned int timeout)
  979. {
  980. struct gb_operation *operation;
  981. int ret;
  982. if ((response_size && !response) ||
  983. (request_size && !request))
  984. return -EINVAL;
  985. operation = gb_operation_create(connection, type,
  986. request_size, response_size,
  987. GFP_KERNEL);
  988. if (!operation)
  989. return -ENOMEM;
  990. if (request_size)
  991. memcpy(operation->request->payload, request, request_size);
  992. ret = gb_operation_request_send_sync_timeout(operation, timeout);
  993. if (ret) {
  994. dev_err(&connection->hd->dev,
  995. "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
  996. connection->name, operation->id, type, ret);
  997. } else {
  998. if (response_size) {
  999. memcpy(response, operation->response->payload,
  1000. response_size);
  1001. }
  1002. }
  1003. gb_operation_put(operation);
  1004. return ret;
  1005. }
  1006. EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
  1007. /**
  1008. * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
  1009. * @connection: connection to use
  1010. * @type: type of operation to send
  1011. * @request: memory buffer to copy the request from
  1012. * @request_size: size of @request
  1013. * @timeout: send timeout in milliseconds
  1014. *
  1015. * Initiate a unidirectional operation by sending a request message and
  1016. * waiting for it to be acknowledged as sent by the host device.
  1017. *
  1018. * Note that successful send of a unidirectional operation does not imply that
  1019. * the request as actually reached the remote end of the connection.
  1020. */
  1021. int gb_operation_unidirectional_timeout(struct gb_connection *connection,
  1022. int type, void *request,
  1023. int request_size,
  1024. unsigned int timeout)
  1025. {
  1026. struct gb_operation *operation;
  1027. int ret;
  1028. if (request_size && !request)
  1029. return -EINVAL;
  1030. operation = gb_operation_create_flags(connection, type,
  1031. request_size, 0,
  1032. GB_OPERATION_FLAG_UNIDIRECTIONAL,
  1033. GFP_KERNEL);
  1034. if (!operation)
  1035. return -ENOMEM;
  1036. if (request_size)
  1037. memcpy(operation->request->payload, request, request_size);
  1038. ret = gb_operation_request_send_sync_timeout(operation, timeout);
  1039. if (ret) {
  1040. dev_err(&connection->hd->dev,
  1041. "%s: unidirectional operation of type 0x%02x failed: %d\n",
  1042. connection->name, type, ret);
  1043. }
  1044. gb_operation_put(operation);
  1045. return ret;
  1046. }
  1047. EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
  1048. int __init gb_operation_init(void)
  1049. {
  1050. gb_message_cache = kmem_cache_create("gb_message_cache",
  1051. sizeof(struct gb_message), 0, 0,
  1052. NULL);
  1053. if (!gb_message_cache)
  1054. return -ENOMEM;
  1055. gb_operation_cache = kmem_cache_create("gb_operation_cache",
  1056. sizeof(struct gb_operation), 0,
  1057. 0, NULL);
  1058. if (!gb_operation_cache)
  1059. goto err_destroy_message_cache;
  1060. gb_operation_completion_wq = alloc_workqueue("greybus_completion",
  1061. 0, 0);
  1062. if (!gb_operation_completion_wq)
  1063. goto err_destroy_operation_cache;
  1064. return 0;
  1065. err_destroy_operation_cache:
  1066. kmem_cache_destroy(gb_operation_cache);
  1067. gb_operation_cache = NULL;
  1068. err_destroy_message_cache:
  1069. kmem_cache_destroy(gb_message_cache);
  1070. gb_message_cache = NULL;
  1071. return -ENOMEM;
  1072. }
  1073. void gb_operation_exit(void)
  1074. {
  1075. destroy_workqueue(gb_operation_completion_wq);
  1076. gb_operation_completion_wq = NULL;
  1077. kmem_cache_destroy(gb_operation_cache);
  1078. gb_operation_cache = NULL;
  1079. kmem_cache_destroy(gb_message_cache);
  1080. gb_message_cache = NULL;
  1081. }