input.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Input layer to RF Kill interface connector
  4. *
  5. * Copyright (c) 2007 Dmitry Torokhov
  6. * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
  7. *
  8. * If you ever run into a situation in which you have a SW_ type rfkill
  9. * input device, then you can revive code that was removed in the patch
  10. * "rfkill-input: remove unused code".
  11. */
  12. #include <linux/input.h>
  13. #include <linux/slab.h>
  14. #include <linux/moduleparam.h>
  15. #include <linux/workqueue.h>
  16. #include <linux/init.h>
  17. #include <linux/rfkill.h>
  18. #include <linux/sched.h>
  19. #include "rfkill.h"
  20. enum rfkill_input_master_mode {
  21. RFKILL_INPUT_MASTER_UNLOCK = 0,
  22. RFKILL_INPUT_MASTER_RESTORE = 1,
  23. RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
  24. NUM_RFKILL_INPUT_MASTER_MODES
  25. };
  26. /* Delay (in ms) between consecutive switch ops */
  27. #define RFKILL_OPS_DELAY 200
  28. static enum rfkill_input_master_mode rfkill_master_switch_mode =
  29. RFKILL_INPUT_MASTER_UNBLOCKALL;
  30. module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
  31. MODULE_PARM_DESC(master_switch_mode,
  32. "SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all");
  33. static spinlock_t rfkill_op_lock;
  34. static bool rfkill_op_pending;
  35. static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
  36. static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
  37. enum rfkill_sched_op {
  38. RFKILL_GLOBAL_OP_EPO = 0,
  39. RFKILL_GLOBAL_OP_RESTORE,
  40. RFKILL_GLOBAL_OP_UNLOCK,
  41. RFKILL_GLOBAL_OP_UNBLOCK,
  42. };
  43. static enum rfkill_sched_op rfkill_master_switch_op;
  44. static enum rfkill_sched_op rfkill_op;
  45. static void __rfkill_handle_global_op(enum rfkill_sched_op op)
  46. {
  47. unsigned int i;
  48. switch (op) {
  49. case RFKILL_GLOBAL_OP_EPO:
  50. rfkill_epo();
  51. break;
  52. case RFKILL_GLOBAL_OP_RESTORE:
  53. rfkill_restore_states();
  54. break;
  55. case RFKILL_GLOBAL_OP_UNLOCK:
  56. rfkill_remove_epo_lock();
  57. break;
  58. case RFKILL_GLOBAL_OP_UNBLOCK:
  59. rfkill_remove_epo_lock();
  60. for (i = 0; i < NUM_RFKILL_TYPES; i++)
  61. rfkill_switch_all(i, false);
  62. break;
  63. default:
  64. /* memory corruption or bug, fail safely */
  65. rfkill_epo();
  66. WARN(1, "Unknown requested operation %d! "
  67. "rfkill Emergency Power Off activated\n",
  68. op);
  69. }
  70. }
  71. static void __rfkill_handle_normal_op(const enum rfkill_type type,
  72. const bool complement)
  73. {
  74. bool blocked;
  75. blocked = rfkill_get_global_sw_state(type);
  76. if (complement)
  77. blocked = !blocked;
  78. rfkill_switch_all(type, blocked);
  79. }
  80. static void rfkill_op_handler(struct work_struct *work)
  81. {
  82. unsigned int i;
  83. bool c;
  84. spin_lock_irq(&rfkill_op_lock);
  85. do {
  86. if (rfkill_op_pending) {
  87. enum rfkill_sched_op op = rfkill_op;
  88. rfkill_op_pending = false;
  89. memset(rfkill_sw_pending, 0,
  90. sizeof(rfkill_sw_pending));
  91. spin_unlock_irq(&rfkill_op_lock);
  92. __rfkill_handle_global_op(op);
  93. spin_lock_irq(&rfkill_op_lock);
  94. /*
  95. * handle global ops first -- during unlocked period
  96. * we might have gotten a new global op.
  97. */
  98. if (rfkill_op_pending)
  99. continue;
  100. }
  101. if (rfkill_is_epo_lock_active())
  102. continue;
  103. for (i = 0; i < NUM_RFKILL_TYPES; i++) {
  104. if (__test_and_clear_bit(i, rfkill_sw_pending)) {
  105. c = __test_and_clear_bit(i, rfkill_sw_state);
  106. spin_unlock_irq(&rfkill_op_lock);
  107. __rfkill_handle_normal_op(i, c);
  108. spin_lock_irq(&rfkill_op_lock);
  109. }
  110. }
  111. } while (rfkill_op_pending);
  112. spin_unlock_irq(&rfkill_op_lock);
  113. }
  114. static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler);
  115. static unsigned long rfkill_last_scheduled;
  116. static unsigned long rfkill_ratelimit(const unsigned long last)
  117. {
  118. const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
  119. return time_after(jiffies, last + delay) ? 0 : delay;
  120. }
  121. static void rfkill_schedule_ratelimited(void)
  122. {
  123. if (schedule_delayed_work(&rfkill_op_work,
  124. rfkill_ratelimit(rfkill_last_scheduled)))
  125. rfkill_last_scheduled = jiffies;
  126. }
  127. static void rfkill_schedule_global_op(enum rfkill_sched_op op)
  128. {
  129. unsigned long flags;
  130. spin_lock_irqsave(&rfkill_op_lock, flags);
  131. rfkill_op = op;
  132. rfkill_op_pending = true;
  133. if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
  134. /* bypass the limiter for EPO */
  135. mod_delayed_work(system_wq, &rfkill_op_work, 0);
  136. rfkill_last_scheduled = jiffies;
  137. } else
  138. rfkill_schedule_ratelimited();
  139. spin_unlock_irqrestore(&rfkill_op_lock, flags);
  140. }
  141. static void rfkill_schedule_toggle(enum rfkill_type type)
  142. {
  143. unsigned long flags;
  144. if (rfkill_is_epo_lock_active())
  145. return;
  146. spin_lock_irqsave(&rfkill_op_lock, flags);
  147. if (!rfkill_op_pending) {
  148. __set_bit(type, rfkill_sw_pending);
  149. __change_bit(type, rfkill_sw_state);
  150. rfkill_schedule_ratelimited();
  151. }
  152. spin_unlock_irqrestore(&rfkill_op_lock, flags);
  153. }
  154. static void rfkill_schedule_evsw_rfkillall(int state)
  155. {
  156. if (state)
  157. rfkill_schedule_global_op(rfkill_master_switch_op);
  158. else
  159. rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
  160. }
  161. static void rfkill_event(struct input_handle *handle, unsigned int type,
  162. unsigned int code, int data)
  163. {
  164. if (type == EV_KEY && data == 1) {
  165. switch (code) {
  166. case KEY_WLAN:
  167. rfkill_schedule_toggle(RFKILL_TYPE_WLAN);
  168. break;
  169. case KEY_BLUETOOTH:
  170. rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH);
  171. break;
  172. case KEY_UWB:
  173. rfkill_schedule_toggle(RFKILL_TYPE_UWB);
  174. break;
  175. case KEY_WIMAX:
  176. rfkill_schedule_toggle(RFKILL_TYPE_WIMAX);
  177. break;
  178. case KEY_RFKILL:
  179. rfkill_schedule_toggle(RFKILL_TYPE_ALL);
  180. break;
  181. }
  182. } else if (type == EV_SW && code == SW_RFKILL_ALL)
  183. rfkill_schedule_evsw_rfkillall(data);
  184. }
  185. static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
  186. const struct input_device_id *id)
  187. {
  188. struct input_handle *handle;
  189. int error;
  190. handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
  191. if (!handle)
  192. return -ENOMEM;
  193. handle->dev = dev;
  194. handle->handler = handler;
  195. handle->name = "rfkill";
  196. /* causes rfkill_start() to be called */
  197. error = input_register_handle(handle);
  198. if (error)
  199. goto err_free_handle;
  200. error = input_open_device(handle);
  201. if (error)
  202. goto err_unregister_handle;
  203. return 0;
  204. err_unregister_handle:
  205. input_unregister_handle(handle);
  206. err_free_handle:
  207. kfree(handle);
  208. return error;
  209. }
  210. static void rfkill_start(struct input_handle *handle)
  211. {
  212. /*
  213. * Take event_lock to guard against configuration changes, we
  214. * should be able to deal with concurrency with rfkill_event()
  215. * just fine (which event_lock will also avoid).
  216. */
  217. spin_lock_irq(&handle->dev->event_lock);
  218. if (test_bit(EV_SW, handle->dev->evbit) &&
  219. test_bit(SW_RFKILL_ALL, handle->dev->swbit))
  220. rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
  221. handle->dev->sw));
  222. spin_unlock_irq(&handle->dev->event_lock);
  223. }
  224. static void rfkill_disconnect(struct input_handle *handle)
  225. {
  226. input_close_device(handle);
  227. input_unregister_handle(handle);
  228. kfree(handle);
  229. }
  230. static const struct input_device_id rfkill_ids[] = {
  231. {
  232. .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
  233. .evbit = { BIT_MASK(EV_KEY) },
  234. .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
  235. },
  236. {
  237. .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
  238. .evbit = { BIT_MASK(EV_KEY) },
  239. .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
  240. },
  241. {
  242. .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
  243. .evbit = { BIT_MASK(EV_KEY) },
  244. .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
  245. },
  246. {
  247. .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
  248. .evbit = { BIT_MASK(EV_KEY) },
  249. .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
  250. },
  251. {
  252. .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
  253. .evbit = { BIT_MASK(EV_KEY) },
  254. .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) },
  255. },
  256. {
  257. .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
  258. .evbit = { BIT(EV_SW) },
  259. .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
  260. },
  261. { }
  262. };
  263. static struct input_handler rfkill_handler = {
  264. .name = "rfkill",
  265. .event = rfkill_event,
  266. .connect = rfkill_connect,
  267. .start = rfkill_start,
  268. .disconnect = rfkill_disconnect,
  269. .id_table = rfkill_ids,
  270. };
  271. int __init rfkill_handler_init(void)
  272. {
  273. switch (rfkill_master_switch_mode) {
  274. case RFKILL_INPUT_MASTER_UNBLOCKALL:
  275. rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK;
  276. break;
  277. case RFKILL_INPUT_MASTER_RESTORE:
  278. rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE;
  279. break;
  280. case RFKILL_INPUT_MASTER_UNLOCK:
  281. rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK;
  282. break;
  283. default:
  284. return -EINVAL;
  285. }
  286. spin_lock_init(&rfkill_op_lock);
  287. /* Avoid delay at first schedule */
  288. rfkill_last_scheduled =
  289. jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
  290. return input_register_handler(&rfkill_handler);
  291. }
  292. void __exit rfkill_handler_exit(void)
  293. {
  294. input_unregister_handler(&rfkill_handler);
  295. cancel_delayed_work_sync(&rfkill_op_work);
  296. }