svc.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * SVC Greybus driver.
  4. *
  5. * Copyright 2015 Google Inc.
  6. * Copyright 2015 Linaro Ltd.
  7. */
  8. #include <linux/debugfs.h>
  9. #include <linux/workqueue.h>
  10. #include <linux/greybus.h>
  11. #define SVC_INTF_EJECT_TIMEOUT 9000
  12. #define SVC_INTF_ACTIVATE_TIMEOUT 6000
  13. #define SVC_INTF_RESUME_TIMEOUT 3000
  14. struct gb_svc_deferred_request {
  15. struct work_struct work;
  16. struct gb_operation *operation;
  17. };
  18. static int gb_svc_queue_deferred_request(struct gb_operation *operation);
  19. static ssize_t endo_id_show(struct device *dev,
  20. struct device_attribute *attr, char *buf)
  21. {
  22. struct gb_svc *svc = to_gb_svc(dev);
  23. return sprintf(buf, "0x%04x\n", svc->endo_id);
  24. }
  25. static DEVICE_ATTR_RO(endo_id);
  26. static ssize_t ap_intf_id_show(struct device *dev,
  27. struct device_attribute *attr, char *buf)
  28. {
  29. struct gb_svc *svc = to_gb_svc(dev);
  30. return sprintf(buf, "%u\n", svc->ap_intf_id);
  31. }
  32. static DEVICE_ATTR_RO(ap_intf_id);
  33. // FIXME
  34. // This is a hack, we need to do this "right" and clean the interface up
  35. // properly, not just forcibly yank the thing out of the system and hope for the
  36. // best. But for now, people want their modules to come out without having to
  37. // throw the thing to the ground or get out a screwdriver.
  38. static ssize_t intf_eject_store(struct device *dev,
  39. struct device_attribute *attr, const char *buf,
  40. size_t len)
  41. {
  42. struct gb_svc *svc = to_gb_svc(dev);
  43. unsigned short intf_id;
  44. int ret;
  45. ret = kstrtou16(buf, 10, &intf_id);
  46. if (ret < 0)
  47. return ret;
  48. dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
  49. ret = gb_svc_intf_eject(svc, intf_id);
  50. if (ret < 0)
  51. return ret;
  52. return len;
  53. }
  54. static DEVICE_ATTR_WO(intf_eject);
  55. static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
  56. char *buf)
  57. {
  58. struct gb_svc *svc = to_gb_svc(dev);
  59. return sprintf(buf, "%s\n",
  60. gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
  61. }
  62. static ssize_t watchdog_store(struct device *dev,
  63. struct device_attribute *attr, const char *buf,
  64. size_t len)
  65. {
  66. struct gb_svc *svc = to_gb_svc(dev);
  67. int retval;
  68. bool user_request;
  69. retval = strtobool(buf, &user_request);
  70. if (retval)
  71. return retval;
  72. if (user_request)
  73. retval = gb_svc_watchdog_enable(svc);
  74. else
  75. retval = gb_svc_watchdog_disable(svc);
  76. if (retval)
  77. return retval;
  78. return len;
  79. }
  80. static DEVICE_ATTR_RW(watchdog);
  81. static ssize_t watchdog_action_show(struct device *dev,
  82. struct device_attribute *attr, char *buf)
  83. {
  84. struct gb_svc *svc = to_gb_svc(dev);
  85. if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
  86. return sprintf(buf, "panic\n");
  87. else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
  88. return sprintf(buf, "reset\n");
  89. return -EINVAL;
  90. }
  91. static ssize_t watchdog_action_store(struct device *dev,
  92. struct device_attribute *attr,
  93. const char *buf, size_t len)
  94. {
  95. struct gb_svc *svc = to_gb_svc(dev);
  96. if (sysfs_streq(buf, "panic"))
  97. svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
  98. else if (sysfs_streq(buf, "reset"))
  99. svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
  100. else
  101. return -EINVAL;
  102. return len;
  103. }
  104. static DEVICE_ATTR_RW(watchdog_action);
  105. static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
  106. {
  107. struct gb_svc_pwrmon_rail_count_get_response response;
  108. int ret;
  109. ret = gb_operation_sync(svc->connection,
  110. GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
  111. &response, sizeof(response));
  112. if (ret) {
  113. dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
  114. return ret;
  115. }
  116. *value = response.rail_count;
  117. return 0;
  118. }
  119. static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
  120. struct gb_svc_pwrmon_rail_names_get_response *response,
  121. size_t bufsize)
  122. {
  123. int ret;
  124. ret = gb_operation_sync(svc->connection,
  125. GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
  126. response, bufsize);
  127. if (ret) {
  128. dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
  129. return ret;
  130. }
  131. if (response->status != GB_SVC_OP_SUCCESS) {
  132. dev_err(&svc->dev,
  133. "SVC error while getting rail names: %u\n",
  134. response->status);
  135. return -EREMOTEIO;
  136. }
  137. return 0;
  138. }
  139. static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
  140. u8 measurement_type, u32 *value)
  141. {
  142. struct gb_svc_pwrmon_sample_get_request request;
  143. struct gb_svc_pwrmon_sample_get_response response;
  144. int ret;
  145. request.rail_id = rail_id;
  146. request.measurement_type = measurement_type;
  147. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
  148. &request, sizeof(request),
  149. &response, sizeof(response));
  150. if (ret) {
  151. dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
  152. return ret;
  153. }
  154. if (response.result) {
  155. dev_err(&svc->dev,
  156. "UniPro error while getting rail power sample (%d %d): %d\n",
  157. rail_id, measurement_type, response.result);
  158. switch (response.result) {
  159. case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
  160. return -EINVAL;
  161. case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
  162. return -ENOMSG;
  163. default:
  164. return -EREMOTEIO;
  165. }
  166. }
  167. *value = le32_to_cpu(response.measurement);
  168. return 0;
  169. }
  170. int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
  171. u8 measurement_type, u32 *value)
  172. {
  173. struct gb_svc_pwrmon_intf_sample_get_request request;
  174. struct gb_svc_pwrmon_intf_sample_get_response response;
  175. int ret;
  176. request.intf_id = intf_id;
  177. request.measurement_type = measurement_type;
  178. ret = gb_operation_sync(svc->connection,
  179. GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
  180. &request, sizeof(request),
  181. &response, sizeof(response));
  182. if (ret) {
  183. dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
  184. return ret;
  185. }
  186. if (response.result) {
  187. dev_err(&svc->dev,
  188. "UniPro error while getting intf power sample (%d %d): %d\n",
  189. intf_id, measurement_type, response.result);
  190. switch (response.result) {
  191. case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
  192. return -EINVAL;
  193. case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
  194. return -ENOMSG;
  195. default:
  196. return -EREMOTEIO;
  197. }
  198. }
  199. *value = le32_to_cpu(response.measurement);
  200. return 0;
  201. }
  202. static struct attribute *svc_attrs[] = {
  203. &dev_attr_endo_id.attr,
  204. &dev_attr_ap_intf_id.attr,
  205. &dev_attr_intf_eject.attr,
  206. &dev_attr_watchdog.attr,
  207. &dev_attr_watchdog_action.attr,
  208. NULL,
  209. };
  210. ATTRIBUTE_GROUPS(svc);
  211. int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
  212. {
  213. struct gb_svc_intf_device_id_request request;
  214. request.intf_id = intf_id;
  215. request.device_id = device_id;
  216. return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
  217. &request, sizeof(request), NULL, 0);
  218. }
  219. int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
  220. {
  221. struct gb_svc_intf_eject_request request;
  222. int ret;
  223. request.intf_id = intf_id;
  224. /*
  225. * The pulse width for module release in svc is long so we need to
  226. * increase the timeout so the operation will not return to soon.
  227. */
  228. ret = gb_operation_sync_timeout(svc->connection,
  229. GB_SVC_TYPE_INTF_EJECT, &request,
  230. sizeof(request), NULL, 0,
  231. SVC_INTF_EJECT_TIMEOUT);
  232. if (ret) {
  233. dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
  234. return ret;
  235. }
  236. return 0;
  237. }
  238. int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
  239. {
  240. struct gb_svc_intf_vsys_request request;
  241. struct gb_svc_intf_vsys_response response;
  242. int type, ret;
  243. request.intf_id = intf_id;
  244. if (enable)
  245. type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
  246. else
  247. type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
  248. ret = gb_operation_sync(svc->connection, type,
  249. &request, sizeof(request),
  250. &response, sizeof(response));
  251. if (ret < 0)
  252. return ret;
  253. if (response.result_code != GB_SVC_INTF_VSYS_OK)
  254. return -EREMOTEIO;
  255. return 0;
  256. }
  257. int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
  258. {
  259. struct gb_svc_intf_refclk_request request;
  260. struct gb_svc_intf_refclk_response response;
  261. int type, ret;
  262. request.intf_id = intf_id;
  263. if (enable)
  264. type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
  265. else
  266. type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
  267. ret = gb_operation_sync(svc->connection, type,
  268. &request, sizeof(request),
  269. &response, sizeof(response));
  270. if (ret < 0)
  271. return ret;
  272. if (response.result_code != GB_SVC_INTF_REFCLK_OK)
  273. return -EREMOTEIO;
  274. return 0;
  275. }
  276. int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
  277. {
  278. struct gb_svc_intf_unipro_request request;
  279. struct gb_svc_intf_unipro_response response;
  280. int type, ret;
  281. request.intf_id = intf_id;
  282. if (enable)
  283. type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
  284. else
  285. type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
  286. ret = gb_operation_sync(svc->connection, type,
  287. &request, sizeof(request),
  288. &response, sizeof(response));
  289. if (ret < 0)
  290. return ret;
  291. if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
  292. return -EREMOTEIO;
  293. return 0;
  294. }
  295. int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
  296. {
  297. struct gb_svc_intf_activate_request request;
  298. struct gb_svc_intf_activate_response response;
  299. int ret;
  300. request.intf_id = intf_id;
  301. ret = gb_operation_sync_timeout(svc->connection,
  302. GB_SVC_TYPE_INTF_ACTIVATE,
  303. &request, sizeof(request),
  304. &response, sizeof(response),
  305. SVC_INTF_ACTIVATE_TIMEOUT);
  306. if (ret < 0)
  307. return ret;
  308. if (response.status != GB_SVC_OP_SUCCESS) {
  309. dev_err(&svc->dev, "failed to activate interface %u: %u\n",
  310. intf_id, response.status);
  311. return -EREMOTEIO;
  312. }
  313. *intf_type = response.intf_type;
  314. return 0;
  315. }
  316. int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
  317. {
  318. struct gb_svc_intf_resume_request request;
  319. struct gb_svc_intf_resume_response response;
  320. int ret;
  321. request.intf_id = intf_id;
  322. ret = gb_operation_sync_timeout(svc->connection,
  323. GB_SVC_TYPE_INTF_RESUME,
  324. &request, sizeof(request),
  325. &response, sizeof(response),
  326. SVC_INTF_RESUME_TIMEOUT);
  327. if (ret < 0) {
  328. dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
  329. intf_id, ret);
  330. return ret;
  331. }
  332. if (response.status != GB_SVC_OP_SUCCESS) {
  333. dev_err(&svc->dev, "failed to resume interface %u: %u\n",
  334. intf_id, response.status);
  335. return -EREMOTEIO;
  336. }
  337. return 0;
  338. }
  339. int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
  340. u32 *value)
  341. {
  342. struct gb_svc_dme_peer_get_request request;
  343. struct gb_svc_dme_peer_get_response response;
  344. u16 result;
  345. int ret;
  346. request.intf_id = intf_id;
  347. request.attr = cpu_to_le16(attr);
  348. request.selector = cpu_to_le16(selector);
  349. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
  350. &request, sizeof(request),
  351. &response, sizeof(response));
  352. if (ret) {
  353. dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
  354. intf_id, attr, selector, ret);
  355. return ret;
  356. }
  357. result = le16_to_cpu(response.result_code);
  358. if (result) {
  359. dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
  360. intf_id, attr, selector, result);
  361. return -EREMOTEIO;
  362. }
  363. if (value)
  364. *value = le32_to_cpu(response.attr_value);
  365. return 0;
  366. }
  367. int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
  368. u32 value)
  369. {
  370. struct gb_svc_dme_peer_set_request request;
  371. struct gb_svc_dme_peer_set_response response;
  372. u16 result;
  373. int ret;
  374. request.intf_id = intf_id;
  375. request.attr = cpu_to_le16(attr);
  376. request.selector = cpu_to_le16(selector);
  377. request.value = cpu_to_le32(value);
  378. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
  379. &request, sizeof(request),
  380. &response, sizeof(response));
  381. if (ret) {
  382. dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
  383. intf_id, attr, selector, value, ret);
  384. return ret;
  385. }
  386. result = le16_to_cpu(response.result_code);
  387. if (result) {
  388. dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
  389. intf_id, attr, selector, value, result);
  390. return -EREMOTEIO;
  391. }
  392. return 0;
  393. }
  394. int gb_svc_connection_create(struct gb_svc *svc,
  395. u8 intf1_id, u16 cport1_id,
  396. u8 intf2_id, u16 cport2_id,
  397. u8 cport_flags)
  398. {
  399. struct gb_svc_conn_create_request request;
  400. request.intf1_id = intf1_id;
  401. request.cport1_id = cpu_to_le16(cport1_id);
  402. request.intf2_id = intf2_id;
  403. request.cport2_id = cpu_to_le16(cport2_id);
  404. request.tc = 0; /* TC0 */
  405. request.flags = cport_flags;
  406. return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
  407. &request, sizeof(request), NULL, 0);
  408. }
  409. void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
  410. u8 intf2_id, u16 cport2_id)
  411. {
  412. struct gb_svc_conn_destroy_request request;
  413. struct gb_connection *connection = svc->connection;
  414. int ret;
  415. request.intf1_id = intf1_id;
  416. request.cport1_id = cpu_to_le16(cport1_id);
  417. request.intf2_id = intf2_id;
  418. request.cport2_id = cpu_to_le16(cport2_id);
  419. ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
  420. &request, sizeof(request), NULL, 0);
  421. if (ret) {
  422. dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
  423. intf1_id, cport1_id, intf2_id, cport2_id, ret);
  424. }
  425. }
  426. /* Creates bi-directional routes between the devices */
  427. int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
  428. u8 intf2_id, u8 dev2_id)
  429. {
  430. struct gb_svc_route_create_request request;
  431. request.intf1_id = intf1_id;
  432. request.dev1_id = dev1_id;
  433. request.intf2_id = intf2_id;
  434. request.dev2_id = dev2_id;
  435. return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
  436. &request, sizeof(request), NULL, 0);
  437. }
  438. /* Destroys bi-directional routes between the devices */
  439. void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
  440. {
  441. struct gb_svc_route_destroy_request request;
  442. int ret;
  443. request.intf1_id = intf1_id;
  444. request.intf2_id = intf2_id;
  445. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
  446. &request, sizeof(request), NULL, 0);
  447. if (ret) {
  448. dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
  449. intf1_id, intf2_id, ret);
  450. }
  451. }
  452. int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
  453. u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
  454. u8 tx_amplitude, u8 tx_hs_equalizer,
  455. u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
  456. u8 flags, u32 quirks,
  457. struct gb_svc_l2_timer_cfg *local,
  458. struct gb_svc_l2_timer_cfg *remote)
  459. {
  460. struct gb_svc_intf_set_pwrm_request request;
  461. struct gb_svc_intf_set_pwrm_response response;
  462. int ret;
  463. u16 result_code;
  464. memset(&request, 0, sizeof(request));
  465. request.intf_id = intf_id;
  466. request.hs_series = hs_series;
  467. request.tx_mode = tx_mode;
  468. request.tx_gear = tx_gear;
  469. request.tx_nlanes = tx_nlanes;
  470. request.tx_amplitude = tx_amplitude;
  471. request.tx_hs_equalizer = tx_hs_equalizer;
  472. request.rx_mode = rx_mode;
  473. request.rx_gear = rx_gear;
  474. request.rx_nlanes = rx_nlanes;
  475. request.flags = flags;
  476. request.quirks = cpu_to_le32(quirks);
  477. if (local)
  478. request.local_l2timerdata = *local;
  479. if (remote)
  480. request.remote_l2timerdata = *remote;
  481. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
  482. &request, sizeof(request),
  483. &response, sizeof(response));
  484. if (ret < 0)
  485. return ret;
  486. result_code = response.result_code;
  487. if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
  488. dev_err(&svc->dev, "set power mode = %d\n", result_code);
  489. return -EIO;
  490. }
  491. return 0;
  492. }
  493. EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
  494. int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
  495. {
  496. struct gb_svc_intf_set_pwrm_request request;
  497. struct gb_svc_intf_set_pwrm_response response;
  498. int ret;
  499. u16 result_code;
  500. memset(&request, 0, sizeof(request));
  501. request.intf_id = intf_id;
  502. request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
  503. request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
  504. request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
  505. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
  506. &request, sizeof(request),
  507. &response, sizeof(response));
  508. if (ret < 0) {
  509. dev_err(&svc->dev,
  510. "failed to send set power mode operation to interface %u: %d\n",
  511. intf_id, ret);
  512. return ret;
  513. }
  514. result_code = response.result_code;
  515. if (result_code != GB_SVC_SETPWRM_PWR_OK) {
  516. dev_err(&svc->dev,
  517. "failed to hibernate the link for interface %u: %u\n",
  518. intf_id, result_code);
  519. return -EIO;
  520. }
  521. return 0;
  522. }
  523. int gb_svc_ping(struct gb_svc *svc)
  524. {
  525. return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
  526. NULL, 0, NULL, 0,
  527. GB_OPERATION_TIMEOUT_DEFAULT * 2);
  528. }
  529. static int gb_svc_version_request(struct gb_operation *op)
  530. {
  531. struct gb_connection *connection = op->connection;
  532. struct gb_svc *svc = gb_connection_get_data(connection);
  533. struct gb_svc_version_request *request;
  534. struct gb_svc_version_response *response;
  535. if (op->request->payload_size < sizeof(*request)) {
  536. dev_err(&svc->dev, "short version request (%zu < %zu)\n",
  537. op->request->payload_size,
  538. sizeof(*request));
  539. return -EINVAL;
  540. }
  541. request = op->request->payload;
  542. if (request->major > GB_SVC_VERSION_MAJOR) {
  543. dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
  544. request->major, GB_SVC_VERSION_MAJOR);
  545. return -ENOTSUPP;
  546. }
  547. svc->protocol_major = request->major;
  548. svc->protocol_minor = request->minor;
  549. if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
  550. return -ENOMEM;
  551. response = op->response->payload;
  552. response->major = svc->protocol_major;
  553. response->minor = svc->protocol_minor;
  554. return 0;
  555. }
  556. static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
  557. size_t len, loff_t *offset)
  558. {
  559. struct svc_debugfs_pwrmon_rail *pwrmon_rails =
  560. file_inode(file)->i_private;
  561. struct gb_svc *svc = pwrmon_rails->svc;
  562. int ret, desc;
  563. u32 value;
  564. char buff[16];
  565. ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
  566. GB_SVC_PWRMON_TYPE_VOL, &value);
  567. if (ret) {
  568. dev_err(&svc->dev,
  569. "failed to get voltage sample %u: %d\n",
  570. pwrmon_rails->id, ret);
  571. return ret;
  572. }
  573. desc = scnprintf(buff, sizeof(buff), "%u\n", value);
  574. return simple_read_from_buffer(buf, len, offset, buff, desc);
  575. }
  576. static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
  577. size_t len, loff_t *offset)
  578. {
  579. struct svc_debugfs_pwrmon_rail *pwrmon_rails =
  580. file_inode(file)->i_private;
  581. struct gb_svc *svc = pwrmon_rails->svc;
  582. int ret, desc;
  583. u32 value;
  584. char buff[16];
  585. ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
  586. GB_SVC_PWRMON_TYPE_CURR, &value);
  587. if (ret) {
  588. dev_err(&svc->dev,
  589. "failed to get current sample %u: %d\n",
  590. pwrmon_rails->id, ret);
  591. return ret;
  592. }
  593. desc = scnprintf(buff, sizeof(buff), "%u\n", value);
  594. return simple_read_from_buffer(buf, len, offset, buff, desc);
  595. }
  596. static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
  597. size_t len, loff_t *offset)
  598. {
  599. struct svc_debugfs_pwrmon_rail *pwrmon_rails =
  600. file_inode(file)->i_private;
  601. struct gb_svc *svc = pwrmon_rails->svc;
  602. int ret, desc;
  603. u32 value;
  604. char buff[16];
  605. ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
  606. GB_SVC_PWRMON_TYPE_PWR, &value);
  607. if (ret) {
  608. dev_err(&svc->dev, "failed to get power sample %u: %d\n",
  609. pwrmon_rails->id, ret);
  610. return ret;
  611. }
  612. desc = scnprintf(buff, sizeof(buff), "%u\n", value);
  613. return simple_read_from_buffer(buf, len, offset, buff, desc);
  614. }
  615. static const struct file_operations pwrmon_debugfs_voltage_fops = {
  616. .read = pwr_debugfs_voltage_read,
  617. };
  618. static const struct file_operations pwrmon_debugfs_current_fops = {
  619. .read = pwr_debugfs_current_read,
  620. };
  621. static const struct file_operations pwrmon_debugfs_power_fops = {
  622. .read = pwr_debugfs_power_read,
  623. };
  624. static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
  625. {
  626. int i;
  627. size_t bufsize;
  628. struct dentry *dent;
  629. struct gb_svc_pwrmon_rail_names_get_response *rail_names;
  630. u8 rail_count;
  631. dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
  632. if (IS_ERR_OR_NULL(dent))
  633. return;
  634. if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
  635. goto err_pwrmon_debugfs;
  636. if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
  637. goto err_pwrmon_debugfs;
  638. bufsize = sizeof(*rail_names) +
  639. GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
  640. rail_names = kzalloc(bufsize, GFP_KERNEL);
  641. if (!rail_names)
  642. goto err_pwrmon_debugfs;
  643. svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
  644. GFP_KERNEL);
  645. if (!svc->pwrmon_rails)
  646. goto err_pwrmon_debugfs_free;
  647. if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
  648. goto err_pwrmon_debugfs_free;
  649. for (i = 0; i < rail_count; i++) {
  650. struct dentry *dir;
  651. struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
  652. char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
  653. snprintf(fname, sizeof(fname), "%s",
  654. (char *)&rail_names->name[i]);
  655. rail->id = i;
  656. rail->svc = svc;
  657. dir = debugfs_create_dir(fname, dent);
  658. debugfs_create_file("voltage_now", 0444, dir, rail,
  659. &pwrmon_debugfs_voltage_fops);
  660. debugfs_create_file("current_now", 0444, dir, rail,
  661. &pwrmon_debugfs_current_fops);
  662. debugfs_create_file("power_now", 0444, dir, rail,
  663. &pwrmon_debugfs_power_fops);
  664. }
  665. kfree(rail_names);
  666. return;
  667. err_pwrmon_debugfs_free:
  668. kfree(rail_names);
  669. kfree(svc->pwrmon_rails);
  670. svc->pwrmon_rails = NULL;
  671. err_pwrmon_debugfs:
  672. debugfs_remove(dent);
  673. }
  674. static void gb_svc_debugfs_init(struct gb_svc *svc)
  675. {
  676. svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
  677. gb_debugfs_get());
  678. gb_svc_pwrmon_debugfs_init(svc);
  679. }
  680. static void gb_svc_debugfs_exit(struct gb_svc *svc)
  681. {
  682. debugfs_remove_recursive(svc->debugfs_dentry);
  683. kfree(svc->pwrmon_rails);
  684. svc->pwrmon_rails = NULL;
  685. }
  686. static int gb_svc_hello(struct gb_operation *op)
  687. {
  688. struct gb_connection *connection = op->connection;
  689. struct gb_svc *svc = gb_connection_get_data(connection);
  690. struct gb_svc_hello_request *hello_request;
  691. int ret;
  692. if (op->request->payload_size < sizeof(*hello_request)) {
  693. dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
  694. op->request->payload_size,
  695. sizeof(*hello_request));
  696. return -EINVAL;
  697. }
  698. hello_request = op->request->payload;
  699. svc->endo_id = le16_to_cpu(hello_request->endo_id);
  700. svc->ap_intf_id = hello_request->interface_id;
  701. ret = device_add(&svc->dev);
  702. if (ret) {
  703. dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
  704. return ret;
  705. }
  706. ret = gb_svc_watchdog_create(svc);
  707. if (ret) {
  708. dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
  709. goto err_unregister_device;
  710. }
  711. gb_svc_debugfs_init(svc);
  712. ret = gb_svc_queue_deferred_request(op);
  713. if (ret)
  714. goto err_remove_debugfs;
  715. return 0;
  716. err_remove_debugfs:
  717. gb_svc_debugfs_exit(svc);
  718. err_unregister_device:
  719. gb_svc_watchdog_destroy(svc);
  720. device_del(&svc->dev);
  721. return ret;
  722. }
  723. static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
  724. u8 intf_id)
  725. {
  726. struct gb_host_device *hd = svc->hd;
  727. struct gb_module *module;
  728. size_t num_interfaces;
  729. u8 module_id;
  730. list_for_each_entry(module, &hd->modules, hd_node) {
  731. module_id = module->module_id;
  732. num_interfaces = module->num_interfaces;
  733. if (intf_id >= module_id &&
  734. intf_id < module_id + num_interfaces) {
  735. return module->interfaces[intf_id - module_id];
  736. }
  737. }
  738. return NULL;
  739. }
  740. static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
  741. {
  742. struct gb_host_device *hd = svc->hd;
  743. struct gb_module *module;
  744. list_for_each_entry(module, &hd->modules, hd_node) {
  745. if (module->module_id == module_id)
  746. return module;
  747. }
  748. return NULL;
  749. }
  750. static void gb_svc_process_hello_deferred(struct gb_operation *operation)
  751. {
  752. struct gb_connection *connection = operation->connection;
  753. struct gb_svc *svc = gb_connection_get_data(connection);
  754. int ret;
  755. /*
  756. * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
  757. * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
  758. * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
  759. * module.
  760. *
  761. * The code should be removed once SW-2217, Heuristic for UniPro
  762. * Power Mode Changes is resolved.
  763. */
  764. ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
  765. GB_SVC_UNIPRO_HS_SERIES_A,
  766. GB_SVC_UNIPRO_SLOW_AUTO_MODE,
  767. 2, 1,
  768. GB_SVC_SMALL_AMPLITUDE,
  769. GB_SVC_NO_DE_EMPHASIS,
  770. GB_SVC_UNIPRO_SLOW_AUTO_MODE,
  771. 2, 1,
  772. 0, 0,
  773. NULL, NULL);
  774. if (ret)
  775. dev_warn(&svc->dev,
  776. "power mode change failed on AP to switch link: %d\n",
  777. ret);
  778. }
  779. static void gb_svc_process_module_inserted(struct gb_operation *operation)
  780. {
  781. struct gb_svc_module_inserted_request *request;
  782. struct gb_connection *connection = operation->connection;
  783. struct gb_svc *svc = gb_connection_get_data(connection);
  784. struct gb_host_device *hd = svc->hd;
  785. struct gb_module *module;
  786. size_t num_interfaces;
  787. u8 module_id;
  788. u16 flags;
  789. int ret;
  790. /* The request message size has already been verified. */
  791. request = operation->request->payload;
  792. module_id = request->primary_intf_id;
  793. num_interfaces = request->intf_count;
  794. flags = le16_to_cpu(request->flags);
  795. dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
  796. __func__, module_id, num_interfaces, flags);
  797. if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
  798. dev_warn(&svc->dev, "no primary interface detected on module %u\n",
  799. module_id);
  800. }
  801. module = gb_svc_module_lookup(svc, module_id);
  802. if (module) {
  803. dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
  804. module_id);
  805. return;
  806. }
  807. module = gb_module_create(hd, module_id, num_interfaces);
  808. if (!module) {
  809. dev_err(&svc->dev, "failed to create module\n");
  810. return;
  811. }
  812. ret = gb_module_add(module);
  813. if (ret) {
  814. gb_module_put(module);
  815. return;
  816. }
  817. list_add(&module->hd_node, &hd->modules);
  818. }
  819. static void gb_svc_process_module_removed(struct gb_operation *operation)
  820. {
  821. struct gb_svc_module_removed_request *request;
  822. struct gb_connection *connection = operation->connection;
  823. struct gb_svc *svc = gb_connection_get_data(connection);
  824. struct gb_module *module;
  825. u8 module_id;
  826. /* The request message size has already been verified. */
  827. request = operation->request->payload;
  828. module_id = request->primary_intf_id;
  829. dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
  830. module = gb_svc_module_lookup(svc, module_id);
  831. if (!module) {
  832. dev_warn(&svc->dev, "unexpected module-removed event %u\n",
  833. module_id);
  834. return;
  835. }
  836. module->disconnected = true;
  837. gb_module_del(module);
  838. list_del(&module->hd_node);
  839. gb_module_put(module);
  840. }
  841. static void gb_svc_process_intf_oops(struct gb_operation *operation)
  842. {
  843. struct gb_svc_intf_oops_request *request;
  844. struct gb_connection *connection = operation->connection;
  845. struct gb_svc *svc = gb_connection_get_data(connection);
  846. struct gb_interface *intf;
  847. u8 intf_id;
  848. u8 reason;
  849. /* The request message size has already been verified. */
  850. request = operation->request->payload;
  851. intf_id = request->intf_id;
  852. reason = request->reason;
  853. intf = gb_svc_interface_lookup(svc, intf_id);
  854. if (!intf) {
  855. dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
  856. intf_id);
  857. return;
  858. }
  859. dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
  860. intf_id, reason);
  861. mutex_lock(&intf->mutex);
  862. intf->disconnected = true;
  863. gb_interface_disable(intf);
  864. gb_interface_deactivate(intf);
  865. mutex_unlock(&intf->mutex);
  866. }
  867. static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
  868. {
  869. struct gb_svc_intf_mailbox_event_request *request;
  870. struct gb_connection *connection = operation->connection;
  871. struct gb_svc *svc = gb_connection_get_data(connection);
  872. struct gb_interface *intf;
  873. u8 intf_id;
  874. u16 result_code;
  875. u32 mailbox;
  876. /* The request message size has already been verified. */
  877. request = operation->request->payload;
  878. intf_id = request->intf_id;
  879. result_code = le16_to_cpu(request->result_code);
  880. mailbox = le32_to_cpu(request->mailbox);
  881. dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
  882. __func__, intf_id, result_code, mailbox);
  883. intf = gb_svc_interface_lookup(svc, intf_id);
  884. if (!intf) {
  885. dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
  886. return;
  887. }
  888. gb_interface_mailbox_event(intf, result_code, mailbox);
  889. }
  890. static void gb_svc_process_deferred_request(struct work_struct *work)
  891. {
  892. struct gb_svc_deferred_request *dr;
  893. struct gb_operation *operation;
  894. struct gb_svc *svc;
  895. u8 type;
  896. dr = container_of(work, struct gb_svc_deferred_request, work);
  897. operation = dr->operation;
  898. svc = gb_connection_get_data(operation->connection);
  899. type = operation->request->header->type;
  900. switch (type) {
  901. case GB_SVC_TYPE_SVC_HELLO:
  902. gb_svc_process_hello_deferred(operation);
  903. break;
  904. case GB_SVC_TYPE_MODULE_INSERTED:
  905. gb_svc_process_module_inserted(operation);
  906. break;
  907. case GB_SVC_TYPE_MODULE_REMOVED:
  908. gb_svc_process_module_removed(operation);
  909. break;
  910. case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
  911. gb_svc_process_intf_mailbox_event(operation);
  912. break;
  913. case GB_SVC_TYPE_INTF_OOPS:
  914. gb_svc_process_intf_oops(operation);
  915. break;
  916. default:
  917. dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
  918. }
  919. gb_operation_put(operation);
  920. kfree(dr);
  921. }
  922. static int gb_svc_queue_deferred_request(struct gb_operation *operation)
  923. {
  924. struct gb_svc *svc = gb_connection_get_data(operation->connection);
  925. struct gb_svc_deferred_request *dr;
  926. dr = kmalloc(sizeof(*dr), GFP_KERNEL);
  927. if (!dr)
  928. return -ENOMEM;
  929. gb_operation_get(operation);
  930. dr->operation = operation;
  931. INIT_WORK(&dr->work, gb_svc_process_deferred_request);
  932. queue_work(svc->wq, &dr->work);
  933. return 0;
  934. }
  935. static int gb_svc_intf_reset_recv(struct gb_operation *op)
  936. {
  937. struct gb_svc *svc = gb_connection_get_data(op->connection);
  938. struct gb_message *request = op->request;
  939. struct gb_svc_intf_reset_request *reset;
  940. if (request->payload_size < sizeof(*reset)) {
  941. dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
  942. request->payload_size, sizeof(*reset));
  943. return -EINVAL;
  944. }
  945. reset = request->payload;
  946. /* FIXME Reset the interface here */
  947. return 0;
  948. }
  949. static int gb_svc_module_inserted_recv(struct gb_operation *op)
  950. {
  951. struct gb_svc *svc = gb_connection_get_data(op->connection);
  952. struct gb_svc_module_inserted_request *request;
  953. if (op->request->payload_size < sizeof(*request)) {
  954. dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
  955. op->request->payload_size, sizeof(*request));
  956. return -EINVAL;
  957. }
  958. request = op->request->payload;
  959. dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
  960. request->primary_intf_id);
  961. return gb_svc_queue_deferred_request(op);
  962. }
  963. static int gb_svc_module_removed_recv(struct gb_operation *op)
  964. {
  965. struct gb_svc *svc = gb_connection_get_data(op->connection);
  966. struct gb_svc_module_removed_request *request;
  967. if (op->request->payload_size < sizeof(*request)) {
  968. dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
  969. op->request->payload_size, sizeof(*request));
  970. return -EINVAL;
  971. }
  972. request = op->request->payload;
  973. dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
  974. request->primary_intf_id);
  975. return gb_svc_queue_deferred_request(op);
  976. }
  977. static int gb_svc_intf_oops_recv(struct gb_operation *op)
  978. {
  979. struct gb_svc *svc = gb_connection_get_data(op->connection);
  980. struct gb_svc_intf_oops_request *request;
  981. if (op->request->payload_size < sizeof(*request)) {
  982. dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
  983. op->request->payload_size, sizeof(*request));
  984. return -EINVAL;
  985. }
  986. return gb_svc_queue_deferred_request(op);
  987. }
  988. static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
  989. {
  990. struct gb_svc *svc = gb_connection_get_data(op->connection);
  991. struct gb_svc_intf_mailbox_event_request *request;
  992. if (op->request->payload_size < sizeof(*request)) {
  993. dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
  994. op->request->payload_size, sizeof(*request));
  995. return -EINVAL;
  996. }
  997. request = op->request->payload;
  998. dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
  999. return gb_svc_queue_deferred_request(op);
  1000. }
  1001. static int gb_svc_request_handler(struct gb_operation *op)
  1002. {
  1003. struct gb_connection *connection = op->connection;
  1004. struct gb_svc *svc = gb_connection_get_data(connection);
  1005. u8 type = op->type;
  1006. int ret = 0;
  1007. /*
  1008. * SVC requests need to follow a specific order (at least initially) and
  1009. * below code takes care of enforcing that. The expected order is:
  1010. * - PROTOCOL_VERSION
  1011. * - SVC_HELLO
  1012. * - Any other request, but the earlier two.
  1013. *
  1014. * Incoming requests are guaranteed to be serialized and so we don't
  1015. * need to protect 'state' for any races.
  1016. */
  1017. switch (type) {
  1018. case GB_SVC_TYPE_PROTOCOL_VERSION:
  1019. if (svc->state != GB_SVC_STATE_RESET)
  1020. ret = -EINVAL;
  1021. break;
  1022. case GB_SVC_TYPE_SVC_HELLO:
  1023. if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
  1024. ret = -EINVAL;
  1025. break;
  1026. default:
  1027. if (svc->state != GB_SVC_STATE_SVC_HELLO)
  1028. ret = -EINVAL;
  1029. break;
  1030. }
  1031. if (ret) {
  1032. dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
  1033. type, svc->state);
  1034. return ret;
  1035. }
  1036. switch (type) {
  1037. case GB_SVC_TYPE_PROTOCOL_VERSION:
  1038. ret = gb_svc_version_request(op);
  1039. if (!ret)
  1040. svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
  1041. return ret;
  1042. case GB_SVC_TYPE_SVC_HELLO:
  1043. ret = gb_svc_hello(op);
  1044. if (!ret)
  1045. svc->state = GB_SVC_STATE_SVC_HELLO;
  1046. return ret;
  1047. case GB_SVC_TYPE_INTF_RESET:
  1048. return gb_svc_intf_reset_recv(op);
  1049. case GB_SVC_TYPE_MODULE_INSERTED:
  1050. return gb_svc_module_inserted_recv(op);
  1051. case GB_SVC_TYPE_MODULE_REMOVED:
  1052. return gb_svc_module_removed_recv(op);
  1053. case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
  1054. return gb_svc_intf_mailbox_event_recv(op);
  1055. case GB_SVC_TYPE_INTF_OOPS:
  1056. return gb_svc_intf_oops_recv(op);
  1057. default:
  1058. dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
  1059. return -EINVAL;
  1060. }
  1061. }
  1062. static void gb_svc_release(struct device *dev)
  1063. {
  1064. struct gb_svc *svc = to_gb_svc(dev);
  1065. if (svc->connection)
  1066. gb_connection_destroy(svc->connection);
  1067. ida_destroy(&svc->device_id_map);
  1068. destroy_workqueue(svc->wq);
  1069. kfree(svc);
  1070. }
  1071. struct device_type greybus_svc_type = {
  1072. .name = "greybus_svc",
  1073. .release = gb_svc_release,
  1074. };
  1075. struct gb_svc *gb_svc_create(struct gb_host_device *hd)
  1076. {
  1077. struct gb_svc *svc;
  1078. svc = kzalloc(sizeof(*svc), GFP_KERNEL);
  1079. if (!svc)
  1080. return NULL;
  1081. svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
  1082. if (!svc->wq) {
  1083. kfree(svc);
  1084. return NULL;
  1085. }
  1086. svc->dev.parent = &hd->dev;
  1087. svc->dev.bus = &greybus_bus_type;
  1088. svc->dev.type = &greybus_svc_type;
  1089. svc->dev.groups = svc_groups;
  1090. svc->dev.dma_mask = svc->dev.parent->dma_mask;
  1091. device_initialize(&svc->dev);
  1092. dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
  1093. ida_init(&svc->device_id_map);
  1094. svc->state = GB_SVC_STATE_RESET;
  1095. svc->hd = hd;
  1096. svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
  1097. gb_svc_request_handler);
  1098. if (IS_ERR(svc->connection)) {
  1099. dev_err(&svc->dev, "failed to create connection: %ld\n",
  1100. PTR_ERR(svc->connection));
  1101. goto err_put_device;
  1102. }
  1103. gb_connection_set_data(svc->connection, svc);
  1104. return svc;
  1105. err_put_device:
  1106. put_device(&svc->dev);
  1107. return NULL;
  1108. }
  1109. int gb_svc_add(struct gb_svc *svc)
  1110. {
  1111. int ret;
  1112. /*
  1113. * The SVC protocol is currently driven by the SVC, so the SVC device
  1114. * is added from the connection request handler when enough
  1115. * information has been received.
  1116. */
  1117. ret = gb_connection_enable(svc->connection);
  1118. if (ret)
  1119. return ret;
  1120. return 0;
  1121. }
  1122. static void gb_svc_remove_modules(struct gb_svc *svc)
  1123. {
  1124. struct gb_host_device *hd = svc->hd;
  1125. struct gb_module *module, *tmp;
  1126. list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
  1127. gb_module_del(module);
  1128. list_del(&module->hd_node);
  1129. gb_module_put(module);
  1130. }
  1131. }
  1132. void gb_svc_del(struct gb_svc *svc)
  1133. {
  1134. gb_connection_disable_rx(svc->connection);
  1135. /*
  1136. * The SVC device may have been registered from the request handler.
  1137. */
  1138. if (device_is_registered(&svc->dev)) {
  1139. gb_svc_debugfs_exit(svc);
  1140. gb_svc_watchdog_destroy(svc);
  1141. device_del(&svc->dev);
  1142. }
  1143. flush_workqueue(svc->wq);
  1144. gb_svc_remove_modules(svc);
  1145. gb_connection_disable(svc->connection);
  1146. }
  1147. void gb_svc_put(struct gb_svc *svc)
  1148. {
  1149. put_device(&svc->dev);
  1150. }