rpmh.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/atomic.h>
  6. #include <linux/bug.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/jiffies.h>
  9. #include <linux/kernel.h>
  10. #include <linux/list.h>
  11. #include <linux/lockdep.h>
  12. #include <linux/module.h>
  13. #include <linux/of.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/slab.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/types.h>
  18. #include <linux/wait.h>
  19. #include <soc/qcom/rpmh.h>
  20. #include "rpmh-internal.h"
  21. #define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
  22. #define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
  23. struct rpmh_request name = { \
  24. .msg = { \
  25. .state = s, \
  26. .cmds = name.cmd, \
  27. .num_cmds = 0, \
  28. .wait_for_compl = true, \
  29. }, \
  30. .cmd = { { 0 } }, \
  31. .completion = q, \
  32. .dev = device, \
  33. .needs_free = false, \
  34. }
  35. #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
  36. /**
  37. * struct cache_req: the request object for caching
  38. *
  39. * @addr: the address of the resource
  40. * @sleep_val: the sleep vote
  41. * @wake_val: the wake vote
  42. * @list: linked list obj
  43. */
  44. struct cache_req {
  45. u32 addr;
  46. u32 sleep_val;
  47. u32 wake_val;
  48. struct list_head list;
  49. };
  50. /**
  51. * struct batch_cache_req - An entry in our batch catch
  52. *
  53. * @list: linked list obj
  54. * @count: number of messages
  55. * @rpm_msgs: the messages
  56. */
  57. struct batch_cache_req {
  58. struct list_head list;
  59. int count;
  60. struct rpmh_request rpm_msgs[];
  61. };
  62. static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
  63. {
  64. struct rsc_drv *drv = dev_get_drvdata(dev->parent);
  65. return &drv->client;
  66. }
  67. void rpmh_tx_done(const struct tcs_request *msg, int r)
  68. {
  69. struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
  70. msg);
  71. struct completion *compl = rpm_msg->completion;
  72. bool free = rpm_msg->needs_free;
  73. rpm_msg->err = r;
  74. if (r)
  75. dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
  76. rpm_msg->msg.cmds[0].addr, r);
  77. if (!compl)
  78. goto exit;
  79. /* Signal the blocking thread we are done */
  80. complete(compl);
  81. exit:
  82. if (free)
  83. kfree(rpm_msg);
  84. }
  85. static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
  86. {
  87. struct cache_req *p, *req = NULL;
  88. list_for_each_entry(p, &ctrlr->cache, list) {
  89. if (p->addr == addr) {
  90. req = p;
  91. break;
  92. }
  93. }
  94. return req;
  95. }
  96. static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
  97. enum rpmh_state state,
  98. struct tcs_cmd *cmd)
  99. {
  100. struct cache_req *req;
  101. unsigned long flags;
  102. u32 old_sleep_val, old_wake_val;
  103. spin_lock_irqsave(&ctrlr->cache_lock, flags);
  104. req = __find_req(ctrlr, cmd->addr);
  105. if (req)
  106. goto existing;
  107. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  108. if (!req) {
  109. req = ERR_PTR(-ENOMEM);
  110. goto unlock;
  111. }
  112. req->addr = cmd->addr;
  113. req->sleep_val = req->wake_val = UINT_MAX;
  114. list_add_tail(&req->list, &ctrlr->cache);
  115. existing:
  116. old_sleep_val = req->sleep_val;
  117. old_wake_val = req->wake_val;
  118. switch (state) {
  119. case RPMH_ACTIVE_ONLY_STATE:
  120. case RPMH_WAKE_ONLY_STATE:
  121. req->wake_val = cmd->data;
  122. break;
  123. case RPMH_SLEEP_STATE:
  124. req->sleep_val = cmd->data;
  125. break;
  126. }
  127. ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
  128. req->wake_val != old_wake_val) &&
  129. req->sleep_val != UINT_MAX &&
  130. req->wake_val != UINT_MAX;
  131. unlock:
  132. spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
  133. return req;
  134. }
  135. /**
  136. * __rpmh_write: Cache and send the RPMH request
  137. *
  138. * @dev: The device making the request
  139. * @state: Active/Sleep request type
  140. * @rpm_msg: The data that needs to be sent (cmds).
  141. *
  142. * Cache the RPMH request and send if the state is ACTIVE_ONLY.
  143. * SLEEP/WAKE_ONLY requests are not sent to the controller at
  144. * this time. Use rpmh_flush() to send them to the controller.
  145. */
  146. static int __rpmh_write(const struct device *dev, enum rpmh_state state,
  147. struct rpmh_request *rpm_msg)
  148. {
  149. struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
  150. int ret = -EINVAL;
  151. struct cache_req *req;
  152. int i;
  153. rpm_msg->msg.state = state;
  154. /* Cache the request in our store and link the payload */
  155. for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
  156. req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
  157. if (IS_ERR(req))
  158. return PTR_ERR(req);
  159. }
  160. rpm_msg->msg.state = state;
  161. if (state == RPMH_ACTIVE_ONLY_STATE) {
  162. WARN_ON(irqs_disabled());
  163. ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
  164. } else {
  165. /* Clean up our call by spoofing tx_done */
  166. ret = 0;
  167. rpmh_tx_done(&rpm_msg->msg, ret);
  168. }
  169. return ret;
  170. }
  171. static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
  172. const struct tcs_cmd *cmd, u32 n)
  173. {
  174. if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
  175. return -EINVAL;
  176. memcpy(req->cmd, cmd, n * sizeof(*cmd));
  177. req->msg.state = state;
  178. req->msg.cmds = req->cmd;
  179. req->msg.num_cmds = n;
  180. return 0;
  181. }
  182. /**
  183. * rpmh_write_async: Write a set of RPMH commands
  184. *
  185. * @dev: The device making the request
  186. * @state: Active/sleep set
  187. * @cmd: The payload data
  188. * @n: The number of elements in payload
  189. *
  190. * Write a set of RPMH commands, the order of commands is maintained
  191. * and will be sent as a single shot.
  192. */
  193. int rpmh_write_async(const struct device *dev, enum rpmh_state state,
  194. const struct tcs_cmd *cmd, u32 n)
  195. {
  196. struct rpmh_request *rpm_msg;
  197. int ret;
  198. rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
  199. if (!rpm_msg)
  200. return -ENOMEM;
  201. rpm_msg->needs_free = true;
  202. ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
  203. if (ret) {
  204. kfree(rpm_msg);
  205. return ret;
  206. }
  207. return __rpmh_write(dev, state, rpm_msg);
  208. }
  209. EXPORT_SYMBOL(rpmh_write_async);
  210. /**
  211. * rpmh_write: Write a set of RPMH commands and block until response
  212. *
  213. * @rc: The RPMH handle got from rpmh_get_client
  214. * @state: Active/sleep set
  215. * @cmd: The payload data
  216. * @n: The number of elements in @cmd
  217. *
  218. * May sleep. Do not call from atomic contexts.
  219. */
  220. int rpmh_write(const struct device *dev, enum rpmh_state state,
  221. const struct tcs_cmd *cmd, u32 n)
  222. {
  223. DECLARE_COMPLETION_ONSTACK(compl);
  224. DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
  225. int ret;
  226. if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
  227. return -EINVAL;
  228. memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
  229. rpm_msg.msg.num_cmds = n;
  230. ret = __rpmh_write(dev, state, &rpm_msg);
  231. if (ret)
  232. return ret;
  233. ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
  234. WARN_ON(!ret);
  235. return (ret > 0) ? 0 : -ETIMEDOUT;
  236. }
  237. EXPORT_SYMBOL(rpmh_write);
  238. static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
  239. {
  240. unsigned long flags;
  241. spin_lock_irqsave(&ctrlr->cache_lock, flags);
  242. list_add_tail(&req->list, &ctrlr->batch_cache);
  243. ctrlr->dirty = true;
  244. spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
  245. }
  246. static int flush_batch(struct rpmh_ctrlr *ctrlr)
  247. {
  248. struct batch_cache_req *req;
  249. const struct rpmh_request *rpm_msg;
  250. int ret = 0;
  251. int i;
  252. /* Send Sleep/Wake requests to the controller, expect no response */
  253. list_for_each_entry(req, &ctrlr->batch_cache, list) {
  254. for (i = 0; i < req->count; i++) {
  255. rpm_msg = req->rpm_msgs + i;
  256. ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
  257. &rpm_msg->msg);
  258. if (ret)
  259. break;
  260. }
  261. }
  262. return ret;
  263. }
  264. /**
  265. * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
  266. * batch to finish.
  267. *
  268. * @dev: the device making the request
  269. * @state: Active/sleep set
  270. * @cmd: The payload data
  271. * @n: The array of count of elements in each batch, 0 terminated.
  272. *
  273. * Write a request to the RSC controller without caching. If the request
  274. * state is ACTIVE, then the requests are treated as completion request
  275. * and sent to the controller immediately. The function waits until all the
  276. * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
  277. * request is sent as fire-n-forget and no ack is expected.
  278. *
  279. * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
  280. */
  281. int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
  282. const struct tcs_cmd *cmd, u32 *n)
  283. {
  284. struct batch_cache_req *req;
  285. struct rpmh_request *rpm_msgs;
  286. struct completion *compls;
  287. struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
  288. unsigned long time_left;
  289. int count = 0;
  290. int ret, i;
  291. void *ptr;
  292. if (!cmd || !n)
  293. return -EINVAL;
  294. while (n[count] > 0)
  295. count++;
  296. if (!count)
  297. return -EINVAL;
  298. ptr = kzalloc(sizeof(*req) +
  299. count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
  300. GFP_ATOMIC);
  301. if (!ptr)
  302. return -ENOMEM;
  303. req = ptr;
  304. compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
  305. req->count = count;
  306. rpm_msgs = req->rpm_msgs;
  307. for (i = 0; i < count; i++) {
  308. __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
  309. cmd += n[i];
  310. }
  311. if (state != RPMH_ACTIVE_ONLY_STATE) {
  312. cache_batch(ctrlr, req);
  313. return 0;
  314. }
  315. for (i = 0; i < count; i++) {
  316. struct completion *compl = &compls[i];
  317. init_completion(compl);
  318. rpm_msgs[i].completion = compl;
  319. ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
  320. if (ret) {
  321. pr_err("Error(%d) sending RPMH message addr=%#x\n",
  322. ret, rpm_msgs[i].msg.cmds[0].addr);
  323. break;
  324. }
  325. }
  326. time_left = RPMH_TIMEOUT_MS;
  327. while (i--) {
  328. time_left = wait_for_completion_timeout(&compls[i], time_left);
  329. if (!time_left) {
  330. /*
  331. * Better hope they never finish because they'll signal
  332. * the completion that we're going to free once
  333. * we've returned from this function.
  334. */
  335. WARN_ON(1);
  336. ret = -ETIMEDOUT;
  337. goto exit;
  338. }
  339. }
  340. exit:
  341. kfree(ptr);
  342. return ret;
  343. }
  344. EXPORT_SYMBOL(rpmh_write_batch);
  345. static int is_req_valid(struct cache_req *req)
  346. {
  347. return (req->sleep_val != UINT_MAX &&
  348. req->wake_val != UINT_MAX &&
  349. req->sleep_val != req->wake_val);
  350. }
  351. static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
  352. u32 addr, u32 data)
  353. {
  354. DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
  355. /* Wake sets are always complete and sleep sets are not */
  356. rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
  357. rpm_msg.cmd[0].addr = addr;
  358. rpm_msg.cmd[0].data = data;
  359. rpm_msg.msg.num_cmds = 1;
  360. return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
  361. }
  362. /**
  363. * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes
  364. *
  365. * @ctrlr: Controller making request to flush cached data
  366. *
  367. * Return:
  368. * * 0 - Success
  369. * * Error code - Otherwise
  370. */
  371. int rpmh_flush(struct rpmh_ctrlr *ctrlr)
  372. {
  373. struct cache_req *p;
  374. int ret = 0;
  375. lockdep_assert_irqs_disabled();
  376. /*
  377. * Currently rpmh_flush() is only called when we think we're running
  378. * on the last processor. If the lock is busy it means another
  379. * processor is up and it's better to abort than spin.
  380. */
  381. if (!spin_trylock(&ctrlr->cache_lock))
  382. return -EBUSY;
  383. if (!ctrlr->dirty) {
  384. pr_debug("Skipping flush, TCS has latest data.\n");
  385. goto exit;
  386. }
  387. /* Invalidate the TCSes first to avoid stale data */
  388. rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
  389. /* First flush the cached batch requests */
  390. ret = flush_batch(ctrlr);
  391. if (ret)
  392. goto exit;
  393. list_for_each_entry(p, &ctrlr->cache, list) {
  394. if (!is_req_valid(p)) {
  395. pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
  396. __func__, p->addr, p->sleep_val, p->wake_val);
  397. continue;
  398. }
  399. ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
  400. p->sleep_val);
  401. if (ret)
  402. goto exit;
  403. ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
  404. p->wake_val);
  405. if (ret)
  406. goto exit;
  407. }
  408. ctrlr->dirty = false;
  409. exit:
  410. spin_unlock(&ctrlr->cache_lock);
  411. return ret;
  412. }
  413. /**
  414. * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
  415. *
  416. * @dev: The device making the request
  417. *
  418. * Invalidate the sleep and wake values in batch_cache.
  419. */
  420. void rpmh_invalidate(const struct device *dev)
  421. {
  422. struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
  423. struct batch_cache_req *req, *tmp;
  424. unsigned long flags;
  425. spin_lock_irqsave(&ctrlr->cache_lock, flags);
  426. list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
  427. kfree(req);
  428. INIT_LIST_HEAD(&ctrlr->batch_cache);
  429. ctrlr->dirty = true;
  430. spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
  431. }
  432. EXPORT_SYMBOL(rpmh_invalidate);