serio.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * The Serio abstraction module
  4. *
  5. * Copyright (c) 1999-2004 Vojtech Pavlik
  6. * Copyright (c) 2004 Dmitry Torokhov
  7. * Copyright (c) 2003 Daniele Bellucci
  8. */
  9. /*
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/stddef.h>
  13. #include <linux/module.h>
  14. #include <linux/serio.h>
  15. #include <linux/errno.h>
  16. #include <linux/sched.h>
  17. #include <linux/slab.h>
  18. #include <linux/workqueue.h>
  19. #include <linux/mutex.h>
  20. MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
  21. MODULE_DESCRIPTION("Serio abstraction core");
  22. MODULE_LICENSE("GPL");
  23. /*
  24. * serio_mutex protects entire serio subsystem and is taken every time
  25. * serio port or driver registered or unregistered.
  26. */
  27. static DEFINE_MUTEX(serio_mutex);
  28. static LIST_HEAD(serio_list);
  29. static void serio_add_port(struct serio *serio);
  30. static int serio_reconnect_port(struct serio *serio);
  31. static void serio_disconnect_port(struct serio *serio);
  32. static void serio_reconnect_subtree(struct serio *serio);
  33. static void serio_attach_driver(struct serio_driver *drv);
  34. static int serio_connect_driver(struct serio *serio, struct serio_driver *drv)
  35. {
  36. int retval;
  37. mutex_lock(&serio->drv_mutex);
  38. retval = drv->connect(serio, drv);
  39. mutex_unlock(&serio->drv_mutex);
  40. return retval;
  41. }
  42. static int serio_reconnect_driver(struct serio *serio)
  43. {
  44. int retval = -1;
  45. mutex_lock(&serio->drv_mutex);
  46. if (serio->drv && serio->drv->reconnect)
  47. retval = serio->drv->reconnect(serio);
  48. mutex_unlock(&serio->drv_mutex);
  49. return retval;
  50. }
  51. static void serio_disconnect_driver(struct serio *serio)
  52. {
  53. mutex_lock(&serio->drv_mutex);
  54. if (serio->drv)
  55. serio->drv->disconnect(serio);
  56. mutex_unlock(&serio->drv_mutex);
  57. }
  58. static int serio_match_port(const struct serio_device_id *ids, struct serio *serio)
  59. {
  60. while (ids->type || ids->proto) {
  61. if ((ids->type == SERIO_ANY || ids->type == serio->id.type) &&
  62. (ids->proto == SERIO_ANY || ids->proto == serio->id.proto) &&
  63. (ids->extra == SERIO_ANY || ids->extra == serio->id.extra) &&
  64. (ids->id == SERIO_ANY || ids->id == serio->id.id))
  65. return 1;
  66. ids++;
  67. }
  68. return 0;
  69. }
  70. /*
  71. * Basic serio -> driver core mappings
  72. */
  73. static int serio_bind_driver(struct serio *serio, struct serio_driver *drv)
  74. {
  75. int error;
  76. if (serio_match_port(drv->id_table, serio)) {
  77. serio->dev.driver = &drv->driver;
  78. if (serio_connect_driver(serio, drv)) {
  79. serio->dev.driver = NULL;
  80. return -ENODEV;
  81. }
  82. error = device_bind_driver(&serio->dev);
  83. if (error) {
  84. dev_warn(&serio->dev,
  85. "device_bind_driver() failed for %s (%s) and %s, error: %d\n",
  86. serio->phys, serio->name,
  87. drv->description, error);
  88. serio_disconnect_driver(serio);
  89. serio->dev.driver = NULL;
  90. return error;
  91. }
  92. }
  93. return 0;
  94. }
  95. static void serio_find_driver(struct serio *serio)
  96. {
  97. int error;
  98. error = device_attach(&serio->dev);
  99. if (error < 0 && error != -EPROBE_DEFER)
  100. dev_warn(&serio->dev,
  101. "device_attach() failed for %s (%s), error: %d\n",
  102. serio->phys, serio->name, error);
  103. }
  104. /*
  105. * Serio event processing.
  106. */
  107. enum serio_event_type {
  108. SERIO_RESCAN_PORT,
  109. SERIO_RECONNECT_PORT,
  110. SERIO_RECONNECT_SUBTREE,
  111. SERIO_REGISTER_PORT,
  112. SERIO_ATTACH_DRIVER,
  113. };
  114. struct serio_event {
  115. enum serio_event_type type;
  116. void *object;
  117. struct module *owner;
  118. struct list_head node;
  119. };
  120. static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */
  121. static LIST_HEAD(serio_event_list);
  122. static struct serio_event *serio_get_event(void)
  123. {
  124. struct serio_event *event = NULL;
  125. unsigned long flags;
  126. spin_lock_irqsave(&serio_event_lock, flags);
  127. if (!list_empty(&serio_event_list)) {
  128. event = list_first_entry(&serio_event_list,
  129. struct serio_event, node);
  130. list_del_init(&event->node);
  131. }
  132. spin_unlock_irqrestore(&serio_event_lock, flags);
  133. return event;
  134. }
  135. static void serio_free_event(struct serio_event *event)
  136. {
  137. module_put(event->owner);
  138. kfree(event);
  139. }
  140. static void serio_remove_duplicate_events(void *object,
  141. enum serio_event_type type)
  142. {
  143. struct serio_event *e, *next;
  144. unsigned long flags;
  145. spin_lock_irqsave(&serio_event_lock, flags);
  146. list_for_each_entry_safe(e, next, &serio_event_list, node) {
  147. if (object == e->object) {
  148. /*
  149. * If this event is of different type we should not
  150. * look further - we only suppress duplicate events
  151. * that were sent back-to-back.
  152. */
  153. if (type != e->type)
  154. break;
  155. list_del_init(&e->node);
  156. serio_free_event(e);
  157. }
  158. }
  159. spin_unlock_irqrestore(&serio_event_lock, flags);
  160. }
  161. static void serio_handle_event(struct work_struct *work)
  162. {
  163. struct serio_event *event;
  164. mutex_lock(&serio_mutex);
  165. while ((event = serio_get_event())) {
  166. switch (event->type) {
  167. case SERIO_REGISTER_PORT:
  168. serio_add_port(event->object);
  169. break;
  170. case SERIO_RECONNECT_PORT:
  171. serio_reconnect_port(event->object);
  172. break;
  173. case SERIO_RESCAN_PORT:
  174. serio_disconnect_port(event->object);
  175. serio_find_driver(event->object);
  176. break;
  177. case SERIO_RECONNECT_SUBTREE:
  178. serio_reconnect_subtree(event->object);
  179. break;
  180. case SERIO_ATTACH_DRIVER:
  181. serio_attach_driver(event->object);
  182. break;
  183. }
  184. serio_remove_duplicate_events(event->object, event->type);
  185. serio_free_event(event);
  186. }
  187. mutex_unlock(&serio_mutex);
  188. }
  189. static DECLARE_WORK(serio_event_work, serio_handle_event);
  190. static int serio_queue_event(void *object, struct module *owner,
  191. enum serio_event_type event_type)
  192. {
  193. unsigned long flags;
  194. struct serio_event *event;
  195. int retval = 0;
  196. spin_lock_irqsave(&serio_event_lock, flags);
  197. /*
  198. * Scan event list for the other events for the same serio port,
  199. * starting with the most recent one. If event is the same we
  200. * do not need add new one. If event is of different type we
  201. * need to add this event and should not look further because
  202. * we need to preseve sequence of distinct events.
  203. */
  204. list_for_each_entry_reverse(event, &serio_event_list, node) {
  205. if (event->object == object) {
  206. if (event->type == event_type)
  207. goto out;
  208. break;
  209. }
  210. }
  211. event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC);
  212. if (!event) {
  213. pr_err("Not enough memory to queue event %d\n", event_type);
  214. retval = -ENOMEM;
  215. goto out;
  216. }
  217. if (!try_module_get(owner)) {
  218. pr_warn("Can't get module reference, dropping event %d\n",
  219. event_type);
  220. kfree(event);
  221. retval = -EINVAL;
  222. goto out;
  223. }
  224. event->type = event_type;
  225. event->object = object;
  226. event->owner = owner;
  227. list_add_tail(&event->node, &serio_event_list);
  228. queue_work(system_long_wq, &serio_event_work);
  229. out:
  230. spin_unlock_irqrestore(&serio_event_lock, flags);
  231. return retval;
  232. }
  233. /*
  234. * Remove all events that have been submitted for a given
  235. * object, be it serio port or driver.
  236. */
  237. static void serio_remove_pending_events(void *object)
  238. {
  239. struct serio_event *event, *next;
  240. unsigned long flags;
  241. spin_lock_irqsave(&serio_event_lock, flags);
  242. list_for_each_entry_safe(event, next, &serio_event_list, node) {
  243. if (event->object == object) {
  244. list_del_init(&event->node);
  245. serio_free_event(event);
  246. }
  247. }
  248. spin_unlock_irqrestore(&serio_event_lock, flags);
  249. }
  250. /*
  251. * Locate child serio port (if any) that has not been fully registered yet.
  252. *
  253. * Children are registered by driver's connect() handler so there can't be a
  254. * grandchild pending registration together with a child.
  255. */
  256. static struct serio *serio_get_pending_child(struct serio *parent)
  257. {
  258. struct serio_event *event;
  259. struct serio *serio, *child = NULL;
  260. unsigned long flags;
  261. spin_lock_irqsave(&serio_event_lock, flags);
  262. list_for_each_entry(event, &serio_event_list, node) {
  263. if (event->type == SERIO_REGISTER_PORT) {
  264. serio = event->object;
  265. if (serio->parent == parent) {
  266. child = serio;
  267. break;
  268. }
  269. }
  270. }
  271. spin_unlock_irqrestore(&serio_event_lock, flags);
  272. return child;
  273. }
  274. /*
  275. * Serio port operations
  276. */
  277. static ssize_t serio_show_description(struct device *dev, struct device_attribute *attr, char *buf)
  278. {
  279. struct serio *serio = to_serio_port(dev);
  280. return sprintf(buf, "%s\n", serio->name);
  281. }
  282. static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
  283. {
  284. struct serio *serio = to_serio_port(dev);
  285. return sprintf(buf, "serio:ty%02Xpr%02Xid%02Xex%02X\n",
  286. serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
  287. }
  288. static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf)
  289. {
  290. struct serio *serio = to_serio_port(dev);
  291. return sprintf(buf, "%02x\n", serio->id.type);
  292. }
  293. static ssize_t proto_show(struct device *dev, struct device_attribute *attr, char *buf)
  294. {
  295. struct serio *serio = to_serio_port(dev);
  296. return sprintf(buf, "%02x\n", serio->id.proto);
  297. }
  298. static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
  299. {
  300. struct serio *serio = to_serio_port(dev);
  301. return sprintf(buf, "%02x\n", serio->id.id);
  302. }
  303. static ssize_t extra_show(struct device *dev, struct device_attribute *attr, char *buf)
  304. {
  305. struct serio *serio = to_serio_port(dev);
  306. return sprintf(buf, "%02x\n", serio->id.extra);
  307. }
  308. static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  309. {
  310. struct serio *serio = to_serio_port(dev);
  311. struct device_driver *drv;
  312. int error;
  313. error = mutex_lock_interruptible(&serio_mutex);
  314. if (error)
  315. return error;
  316. if (!strncmp(buf, "none", count)) {
  317. serio_disconnect_port(serio);
  318. } else if (!strncmp(buf, "reconnect", count)) {
  319. serio_reconnect_subtree(serio);
  320. } else if (!strncmp(buf, "rescan", count)) {
  321. serio_disconnect_port(serio);
  322. serio_find_driver(serio);
  323. serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
  324. } else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
  325. serio_disconnect_port(serio);
  326. error = serio_bind_driver(serio, to_serio_driver(drv));
  327. serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
  328. } else {
  329. error = -EINVAL;
  330. }
  331. mutex_unlock(&serio_mutex);
  332. return error ? error : count;
  333. }
  334. static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf)
  335. {
  336. struct serio *serio = to_serio_port(dev);
  337. return sprintf(buf, "%s\n", serio->manual_bind ? "manual" : "auto");
  338. }
  339. static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  340. {
  341. struct serio *serio = to_serio_port(dev);
  342. int retval;
  343. retval = count;
  344. if (!strncmp(buf, "manual", count)) {
  345. serio->manual_bind = true;
  346. } else if (!strncmp(buf, "auto", count)) {
  347. serio->manual_bind = false;
  348. } else {
  349. retval = -EINVAL;
  350. }
  351. return retval;
  352. }
  353. static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf)
  354. {
  355. struct serio *serio = to_serio_port(dev);
  356. return sprintf(buf, "%s\n", serio->firmware_id);
  357. }
  358. static DEVICE_ATTR_RO(type);
  359. static DEVICE_ATTR_RO(proto);
  360. static DEVICE_ATTR_RO(id);
  361. static DEVICE_ATTR_RO(extra);
  362. static struct attribute *serio_device_id_attrs[] = {
  363. &dev_attr_type.attr,
  364. &dev_attr_proto.attr,
  365. &dev_attr_id.attr,
  366. &dev_attr_extra.attr,
  367. NULL
  368. };
  369. static const struct attribute_group serio_id_attr_group = {
  370. .name = "id",
  371. .attrs = serio_device_id_attrs,
  372. };
  373. static DEVICE_ATTR_RO(modalias);
  374. static DEVICE_ATTR_WO(drvctl);
  375. static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
  376. static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
  377. static DEVICE_ATTR_RO(firmware_id);
  378. static struct attribute *serio_device_attrs[] = {
  379. &dev_attr_modalias.attr,
  380. &dev_attr_description.attr,
  381. &dev_attr_drvctl.attr,
  382. &dev_attr_bind_mode.attr,
  383. &dev_attr_firmware_id.attr,
  384. NULL
  385. };
  386. static const struct attribute_group serio_device_attr_group = {
  387. .attrs = serio_device_attrs,
  388. };
  389. static const struct attribute_group *serio_device_attr_groups[] = {
  390. &serio_id_attr_group,
  391. &serio_device_attr_group,
  392. NULL
  393. };
  394. static void serio_release_port(struct device *dev)
  395. {
  396. struct serio *serio = to_serio_port(dev);
  397. kfree(serio);
  398. module_put(THIS_MODULE);
  399. }
  400. /*
  401. * Prepare serio port for registration.
  402. */
  403. static void serio_init_port(struct serio *serio)
  404. {
  405. static atomic_t serio_no = ATOMIC_INIT(-1);
  406. __module_get(THIS_MODULE);
  407. INIT_LIST_HEAD(&serio->node);
  408. INIT_LIST_HEAD(&serio->child_node);
  409. INIT_LIST_HEAD(&serio->children);
  410. spin_lock_init(&serio->lock);
  411. mutex_init(&serio->drv_mutex);
  412. device_initialize(&serio->dev);
  413. dev_set_name(&serio->dev, "serio%lu",
  414. (unsigned long)atomic_inc_return(&serio_no));
  415. serio->dev.bus = &serio_bus;
  416. serio->dev.release = serio_release_port;
  417. serio->dev.groups = serio_device_attr_groups;
  418. if (serio->parent) {
  419. serio->dev.parent = &serio->parent->dev;
  420. serio->depth = serio->parent->depth + 1;
  421. } else
  422. serio->depth = 0;
  423. lockdep_set_subclass(&serio->lock, serio->depth);
  424. }
  425. /*
  426. * Complete serio port registration.
  427. * Driver core will attempt to find appropriate driver for the port.
  428. */
  429. static void serio_add_port(struct serio *serio)
  430. {
  431. struct serio *parent = serio->parent;
  432. int error;
  433. if (parent) {
  434. serio_pause_rx(parent);
  435. list_add_tail(&serio->child_node, &parent->children);
  436. serio_continue_rx(parent);
  437. }
  438. list_add_tail(&serio->node, &serio_list);
  439. if (serio->start)
  440. serio->start(serio);
  441. error = device_add(&serio->dev);
  442. if (error)
  443. dev_err(&serio->dev,
  444. "device_add() failed for %s (%s), error: %d\n",
  445. serio->phys, serio->name, error);
  446. }
  447. /*
  448. * serio_destroy_port() completes unregistration process and removes
  449. * port from the system
  450. */
  451. static void serio_destroy_port(struct serio *serio)
  452. {
  453. struct serio *child;
  454. while ((child = serio_get_pending_child(serio)) != NULL) {
  455. serio_remove_pending_events(child);
  456. put_device(&child->dev);
  457. }
  458. if (serio->stop)
  459. serio->stop(serio);
  460. if (serio->parent) {
  461. serio_pause_rx(serio->parent);
  462. list_del_init(&serio->child_node);
  463. serio_continue_rx(serio->parent);
  464. serio->parent = NULL;
  465. }
  466. if (device_is_registered(&serio->dev))
  467. device_del(&serio->dev);
  468. list_del_init(&serio->node);
  469. serio_remove_pending_events(serio);
  470. put_device(&serio->dev);
  471. }
  472. /*
  473. * Reconnect serio port (re-initialize attached device).
  474. * If reconnect fails (old device is no longer attached or
  475. * there was no device to begin with) we do full rescan in
  476. * hope of finding a driver for the port.
  477. */
  478. static int serio_reconnect_port(struct serio *serio)
  479. {
  480. int error = serio_reconnect_driver(serio);
  481. if (error) {
  482. serio_disconnect_port(serio);
  483. serio_find_driver(serio);
  484. }
  485. return error;
  486. }
  487. /*
  488. * Reconnect serio port and all its children (re-initialize attached
  489. * devices).
  490. */
  491. static void serio_reconnect_subtree(struct serio *root)
  492. {
  493. struct serio *s = root;
  494. int error;
  495. do {
  496. error = serio_reconnect_port(s);
  497. if (!error) {
  498. /*
  499. * Reconnect was successful, move on to do the
  500. * first child.
  501. */
  502. if (!list_empty(&s->children)) {
  503. s = list_first_entry(&s->children,
  504. struct serio, child_node);
  505. continue;
  506. }
  507. }
  508. /*
  509. * Either it was a leaf node or reconnect failed and it
  510. * became a leaf node. Continue reconnecting starting with
  511. * the next sibling of the parent node.
  512. */
  513. while (s != root) {
  514. struct serio *parent = s->parent;
  515. if (!list_is_last(&s->child_node, &parent->children)) {
  516. s = list_entry(s->child_node.next,
  517. struct serio, child_node);
  518. break;
  519. }
  520. s = parent;
  521. }
  522. } while (s != root);
  523. }
  524. /*
  525. * serio_disconnect_port() unbinds a port from its driver. As a side effect
  526. * all children ports are unbound and destroyed.
  527. */
  528. static void serio_disconnect_port(struct serio *serio)
  529. {
  530. struct serio *s = serio;
  531. /*
  532. * Children ports should be disconnected and destroyed
  533. * first; we travel the tree in depth-first order.
  534. */
  535. while (!list_empty(&serio->children)) {
  536. /* Locate a leaf */
  537. while (!list_empty(&s->children))
  538. s = list_first_entry(&s->children,
  539. struct serio, child_node);
  540. /*
  541. * Prune this leaf node unless it is the one we
  542. * started with.
  543. */
  544. if (s != serio) {
  545. struct serio *parent = s->parent;
  546. device_release_driver(&s->dev);
  547. serio_destroy_port(s);
  548. s = parent;
  549. }
  550. }
  551. /*
  552. * OK, no children left, now disconnect this port.
  553. */
  554. device_release_driver(&serio->dev);
  555. }
  556. void serio_rescan(struct serio *serio)
  557. {
  558. serio_queue_event(serio, NULL, SERIO_RESCAN_PORT);
  559. }
  560. EXPORT_SYMBOL(serio_rescan);
  561. void serio_reconnect(struct serio *serio)
  562. {
  563. serio_queue_event(serio, NULL, SERIO_RECONNECT_SUBTREE);
  564. }
  565. EXPORT_SYMBOL(serio_reconnect);
  566. /*
  567. * Submits register request to kseriod for subsequent execution.
  568. * Note that port registration is always asynchronous.
  569. */
  570. void __serio_register_port(struct serio *serio, struct module *owner)
  571. {
  572. serio_init_port(serio);
  573. serio_queue_event(serio, owner, SERIO_REGISTER_PORT);
  574. }
  575. EXPORT_SYMBOL(__serio_register_port);
  576. /*
  577. * Synchronously unregisters serio port.
  578. */
  579. void serio_unregister_port(struct serio *serio)
  580. {
  581. mutex_lock(&serio_mutex);
  582. serio_disconnect_port(serio);
  583. serio_destroy_port(serio);
  584. mutex_unlock(&serio_mutex);
  585. }
  586. EXPORT_SYMBOL(serio_unregister_port);
  587. /*
  588. * Safely unregisters children ports if they are present.
  589. */
  590. void serio_unregister_child_port(struct serio *serio)
  591. {
  592. struct serio *s, *next;
  593. mutex_lock(&serio_mutex);
  594. list_for_each_entry_safe(s, next, &serio->children, child_node) {
  595. serio_disconnect_port(s);
  596. serio_destroy_port(s);
  597. }
  598. mutex_unlock(&serio_mutex);
  599. }
  600. EXPORT_SYMBOL(serio_unregister_child_port);
  601. /*
  602. * Serio driver operations
  603. */
  604. static ssize_t description_show(struct device_driver *drv, char *buf)
  605. {
  606. struct serio_driver *driver = to_serio_driver(drv);
  607. return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)");
  608. }
  609. static DRIVER_ATTR_RO(description);
  610. static ssize_t bind_mode_show(struct device_driver *drv, char *buf)
  611. {
  612. struct serio_driver *serio_drv = to_serio_driver(drv);
  613. return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto");
  614. }
  615. static ssize_t bind_mode_store(struct device_driver *drv, const char *buf, size_t count)
  616. {
  617. struct serio_driver *serio_drv = to_serio_driver(drv);
  618. int retval;
  619. retval = count;
  620. if (!strncmp(buf, "manual", count)) {
  621. serio_drv->manual_bind = true;
  622. } else if (!strncmp(buf, "auto", count)) {
  623. serio_drv->manual_bind = false;
  624. } else {
  625. retval = -EINVAL;
  626. }
  627. return retval;
  628. }
  629. static DRIVER_ATTR_RW(bind_mode);
  630. static struct attribute *serio_driver_attrs[] = {
  631. &driver_attr_description.attr,
  632. &driver_attr_bind_mode.attr,
  633. NULL,
  634. };
  635. ATTRIBUTE_GROUPS(serio_driver);
  636. static int serio_driver_probe(struct device *dev)
  637. {
  638. struct serio *serio = to_serio_port(dev);
  639. struct serio_driver *drv = to_serio_driver(dev->driver);
  640. return serio_connect_driver(serio, drv);
  641. }
  642. static int serio_driver_remove(struct device *dev)
  643. {
  644. struct serio *serio = to_serio_port(dev);
  645. serio_disconnect_driver(serio);
  646. return 0;
  647. }
  648. static void serio_cleanup(struct serio *serio)
  649. {
  650. mutex_lock(&serio->drv_mutex);
  651. if (serio->drv && serio->drv->cleanup)
  652. serio->drv->cleanup(serio);
  653. mutex_unlock(&serio->drv_mutex);
  654. }
  655. static void serio_shutdown(struct device *dev)
  656. {
  657. struct serio *serio = to_serio_port(dev);
  658. serio_cleanup(serio);
  659. }
  660. static void serio_attach_driver(struct serio_driver *drv)
  661. {
  662. int error;
  663. error = driver_attach(&drv->driver);
  664. if (error)
  665. pr_warn("driver_attach() failed for %s with error %d\n",
  666. drv->driver.name, error);
  667. }
  668. int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name)
  669. {
  670. bool manual_bind = drv->manual_bind;
  671. int error;
  672. drv->driver.bus = &serio_bus;
  673. drv->driver.owner = owner;
  674. drv->driver.mod_name = mod_name;
  675. /*
  676. * Temporarily disable automatic binding because probing
  677. * takes long time and we are better off doing it in kseriod
  678. */
  679. drv->manual_bind = true;
  680. error = driver_register(&drv->driver);
  681. if (error) {
  682. pr_err("driver_register() failed for %s, error: %d\n",
  683. drv->driver.name, error);
  684. return error;
  685. }
  686. /*
  687. * Restore original bind mode and let kseriod bind the
  688. * driver to free ports
  689. */
  690. if (!manual_bind) {
  691. drv->manual_bind = false;
  692. error = serio_queue_event(drv, NULL, SERIO_ATTACH_DRIVER);
  693. if (error) {
  694. driver_unregister(&drv->driver);
  695. return error;
  696. }
  697. }
  698. return 0;
  699. }
  700. EXPORT_SYMBOL(__serio_register_driver);
  701. void serio_unregister_driver(struct serio_driver *drv)
  702. {
  703. struct serio *serio;
  704. mutex_lock(&serio_mutex);
  705. drv->manual_bind = true; /* so serio_find_driver ignores it */
  706. serio_remove_pending_events(drv);
  707. start_over:
  708. list_for_each_entry(serio, &serio_list, node) {
  709. if (serio->drv == drv) {
  710. serio_disconnect_port(serio);
  711. serio_find_driver(serio);
  712. /* we could've deleted some ports, restart */
  713. goto start_over;
  714. }
  715. }
  716. driver_unregister(&drv->driver);
  717. mutex_unlock(&serio_mutex);
  718. }
  719. EXPORT_SYMBOL(serio_unregister_driver);
  720. static void serio_set_drv(struct serio *serio, struct serio_driver *drv)
  721. {
  722. serio_pause_rx(serio);
  723. serio->drv = drv;
  724. serio_continue_rx(serio);
  725. }
  726. static int serio_bus_match(struct device *dev, struct device_driver *drv)
  727. {
  728. struct serio *serio = to_serio_port(dev);
  729. struct serio_driver *serio_drv = to_serio_driver(drv);
  730. if (serio->manual_bind || serio_drv->manual_bind)
  731. return 0;
  732. return serio_match_port(serio_drv->id_table, serio);
  733. }
  734. #define SERIO_ADD_UEVENT_VAR(fmt, val...) \
  735. do { \
  736. int err = add_uevent_var(env, fmt, val); \
  737. if (err) \
  738. return err; \
  739. } while (0)
  740. static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
  741. {
  742. struct serio *serio;
  743. if (!dev)
  744. return -ENODEV;
  745. serio = to_serio_port(dev);
  746. SERIO_ADD_UEVENT_VAR("SERIO_TYPE=%02x", serio->id.type);
  747. SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto);
  748. SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id);
  749. SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
  750. SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
  751. serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
  752. if (serio->firmware_id[0])
  753. SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s",
  754. serio->firmware_id);
  755. return 0;
  756. }
  757. #undef SERIO_ADD_UEVENT_VAR
  758. #ifdef CONFIG_PM
  759. static int serio_suspend(struct device *dev)
  760. {
  761. struct serio *serio = to_serio_port(dev);
  762. serio_cleanup(serio);
  763. return 0;
  764. }
  765. static int serio_resume(struct device *dev)
  766. {
  767. struct serio *serio = to_serio_port(dev);
  768. int error = -ENOENT;
  769. mutex_lock(&serio->drv_mutex);
  770. if (serio->drv && serio->drv->fast_reconnect) {
  771. error = serio->drv->fast_reconnect(serio);
  772. if (error && error != -ENOENT)
  773. dev_warn(dev, "fast reconnect failed with error %d\n",
  774. error);
  775. }
  776. mutex_unlock(&serio->drv_mutex);
  777. if (error) {
  778. /*
  779. * Driver reconnect can take a while, so better let
  780. * kseriod deal with it.
  781. */
  782. serio_queue_event(serio, NULL, SERIO_RECONNECT_PORT);
  783. }
  784. return 0;
  785. }
  786. static const struct dev_pm_ops serio_pm_ops = {
  787. .suspend = serio_suspend,
  788. .resume = serio_resume,
  789. .poweroff = serio_suspend,
  790. .restore = serio_resume,
  791. };
  792. #endif /* CONFIG_PM */
  793. /* called from serio_driver->connect/disconnect methods under serio_mutex */
  794. int serio_open(struct serio *serio, struct serio_driver *drv)
  795. {
  796. serio_set_drv(serio, drv);
  797. if (serio->open && serio->open(serio)) {
  798. serio_set_drv(serio, NULL);
  799. return -1;
  800. }
  801. return 0;
  802. }
  803. EXPORT_SYMBOL(serio_open);
  804. /* called from serio_driver->connect/disconnect methods under serio_mutex */
  805. void serio_close(struct serio *serio)
  806. {
  807. if (serio->close)
  808. serio->close(serio);
  809. serio_set_drv(serio, NULL);
  810. }
  811. EXPORT_SYMBOL(serio_close);
  812. irqreturn_t serio_interrupt(struct serio *serio,
  813. unsigned char data, unsigned int dfl)
  814. {
  815. unsigned long flags;
  816. irqreturn_t ret = IRQ_NONE;
  817. spin_lock_irqsave(&serio->lock, flags);
  818. if (likely(serio->drv)) {
  819. ret = serio->drv->interrupt(serio, data, dfl);
  820. } else if (!dfl && device_is_registered(&serio->dev)) {
  821. serio_rescan(serio);
  822. ret = IRQ_HANDLED;
  823. }
  824. spin_unlock_irqrestore(&serio->lock, flags);
  825. return ret;
  826. }
  827. EXPORT_SYMBOL(serio_interrupt);
  828. struct bus_type serio_bus = {
  829. .name = "serio",
  830. .drv_groups = serio_driver_groups,
  831. .match = serio_bus_match,
  832. .uevent = serio_uevent,
  833. .probe = serio_driver_probe,
  834. .remove = serio_driver_remove,
  835. .shutdown = serio_shutdown,
  836. #ifdef CONFIG_PM
  837. .pm = &serio_pm_ops,
  838. #endif
  839. };
  840. EXPORT_SYMBOL(serio_bus);
  841. static int __init serio_init(void)
  842. {
  843. int error;
  844. error = bus_register(&serio_bus);
  845. if (error) {
  846. pr_err("Failed to register serio bus, error: %d\n", error);
  847. return error;
  848. }
  849. return 0;
  850. }
  851. static void __exit serio_exit(void)
  852. {
  853. bus_unregister(&serio_bus);
  854. /*
  855. * There should not be any outstanding events but work may
  856. * still be scheduled so simply cancel it.
  857. */
  858. cancel_work_sync(&serio_event_work);
  859. }
  860. subsys_initcall(serio_init);
  861. module_exit(serio_exit);