uverbs_main.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317
  1. /*
  2. * Copyright (c) 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
  4. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  5. * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
  6. * Copyright (c) 2005 PathScale, Inc. All rights reserved.
  7. *
  8. * This software is available to you under a choice of one of two
  9. * licenses. You may choose to be licensed under the terms of the GNU
  10. * General Public License (GPL) Version 2, available from the file
  11. * COPYING in the main directory of this source tree, or the
  12. * OpenIB.org BSD license below:
  13. *
  14. * Redistribution and use in source and binary forms, with or
  15. * without modification, are permitted provided that the following
  16. * conditions are met:
  17. *
  18. * - Redistributions of source code must retain the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer.
  21. *
  22. * - Redistributions in binary form must reproduce the above
  23. * copyright notice, this list of conditions and the following
  24. * disclaimer in the documentation and/or other materials
  25. * provided with the distribution.
  26. *
  27. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34. * SOFTWARE.
  35. */
  36. #include <linux/module.h>
  37. #include <linux/init.h>
  38. #include <linux/device.h>
  39. #include <linux/err.h>
  40. #include <linux/fs.h>
  41. #include <linux/poll.h>
  42. #include <linux/sched.h>
  43. #include <linux/file.h>
  44. #include <linux/cdev.h>
  45. #include <linux/anon_inodes.h>
  46. #include <linux/slab.h>
  47. #include <linux/sched/mm.h>
  48. #include <linux/uaccess.h>
  49. #include <rdma/ib.h>
  50. #include <rdma/uverbs_std_types.h>
  51. #include <rdma/rdma_netlink.h>
  52. #include "uverbs.h"
  53. #include "core_priv.h"
  54. #include "rdma_core.h"
  55. MODULE_AUTHOR("Roland Dreier");
  56. MODULE_DESCRIPTION("InfiniBand userspace verbs access");
  57. MODULE_LICENSE("Dual BSD/GPL");
  58. enum {
  59. IB_UVERBS_MAJOR = 231,
  60. IB_UVERBS_BASE_MINOR = 192,
  61. IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS,
  62. IB_UVERBS_NUM_FIXED_MINOR = 32,
  63. IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR,
  64. };
  65. #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
  66. static dev_t dynamic_uverbs_dev;
  67. static struct class *uverbs_class;
  68. static DEFINE_IDA(uverbs_ida);
  69. static int ib_uverbs_add_one(struct ib_device *device);
  70. static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
  71. /*
  72. * Must be called with the ufile->device->disassociate_srcu held, and the lock
  73. * must be held until use of the ucontext is finished.
  74. */
  75. struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile)
  76. {
  77. /*
  78. * We do not hold the hw_destroy_rwsem lock for this flow, instead
  79. * srcu is used. It does not matter if someone races this with
  80. * get_context, we get NULL or valid ucontext.
  81. */
  82. struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext);
  83. if (!srcu_dereference(ufile->device->ib_dev,
  84. &ufile->device->disassociate_srcu))
  85. return ERR_PTR(-EIO);
  86. if (!ucontext)
  87. return ERR_PTR(-EINVAL);
  88. return ucontext;
  89. }
  90. EXPORT_SYMBOL(ib_uverbs_get_ucontext_file);
  91. int uverbs_dealloc_mw(struct ib_mw *mw)
  92. {
  93. struct ib_pd *pd = mw->pd;
  94. int ret;
  95. ret = mw->device->ops.dealloc_mw(mw);
  96. if (ret)
  97. return ret;
  98. atomic_dec(&pd->usecnt);
  99. kfree(mw);
  100. return ret;
  101. }
  102. static void ib_uverbs_release_dev(struct device *device)
  103. {
  104. struct ib_uverbs_device *dev =
  105. container_of(device, struct ib_uverbs_device, dev);
  106. uverbs_destroy_api(dev->uapi);
  107. cleanup_srcu_struct(&dev->disassociate_srcu);
  108. mutex_destroy(&dev->lists_mutex);
  109. mutex_destroy(&dev->xrcd_tree_mutex);
  110. kfree(dev);
  111. }
  112. void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
  113. struct ib_ucq_object *uobj)
  114. {
  115. struct ib_uverbs_event *evt, *tmp;
  116. if (ev_file) {
  117. spin_lock_irq(&ev_file->ev_queue.lock);
  118. list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
  119. list_del(&evt->list);
  120. kfree(evt);
  121. }
  122. spin_unlock_irq(&ev_file->ev_queue.lock);
  123. uverbs_uobject_put(&ev_file->uobj);
  124. }
  125. ib_uverbs_release_uevent(&uobj->uevent);
  126. }
  127. void ib_uverbs_release_uevent(struct ib_uevent_object *uobj)
  128. {
  129. struct ib_uverbs_async_event_file *async_file = uobj->event_file;
  130. struct ib_uverbs_event *evt, *tmp;
  131. if (!async_file)
  132. return;
  133. spin_lock_irq(&async_file->ev_queue.lock);
  134. list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
  135. list_del(&evt->list);
  136. kfree(evt);
  137. }
  138. spin_unlock_irq(&async_file->ev_queue.lock);
  139. uverbs_uobject_put(&async_file->uobj);
  140. }
  141. void ib_uverbs_detach_umcast(struct ib_qp *qp,
  142. struct ib_uqp_object *uobj)
  143. {
  144. struct ib_uverbs_mcast_entry *mcast, *tmp;
  145. list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) {
  146. ib_detach_mcast(qp, &mcast->gid, mcast->lid);
  147. list_del(&mcast->list);
  148. kfree(mcast);
  149. }
  150. }
  151. static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
  152. {
  153. complete(&dev->comp);
  154. }
  155. void ib_uverbs_release_file(struct kref *ref)
  156. {
  157. struct ib_uverbs_file *file =
  158. container_of(ref, struct ib_uverbs_file, ref);
  159. struct ib_device *ib_dev;
  160. int srcu_key;
  161. release_ufile_idr_uobject(file);
  162. srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
  163. ib_dev = srcu_dereference(file->device->ib_dev,
  164. &file->device->disassociate_srcu);
  165. if (ib_dev && !ib_dev->ops.disassociate_ucontext)
  166. module_put(ib_dev->ops.owner);
  167. srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
  168. if (atomic_dec_and_test(&file->device->refcount))
  169. ib_uverbs_comp_dev(file->device);
  170. if (file->default_async_file)
  171. uverbs_uobject_put(&file->default_async_file->uobj);
  172. put_device(&file->device->dev);
  173. if (file->disassociate_page)
  174. __free_pages(file->disassociate_page, 0);
  175. mutex_destroy(&file->umap_lock);
  176. mutex_destroy(&file->ucontext_lock);
  177. kfree(file);
  178. }
  179. static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
  180. struct file *filp, char __user *buf,
  181. size_t count, loff_t *pos,
  182. size_t eventsz)
  183. {
  184. struct ib_uverbs_event *event;
  185. int ret = 0;
  186. spin_lock_irq(&ev_queue->lock);
  187. while (list_empty(&ev_queue->event_list)) {
  188. spin_unlock_irq(&ev_queue->lock);
  189. if (filp->f_flags & O_NONBLOCK)
  190. return -EAGAIN;
  191. if (wait_event_interruptible(ev_queue->poll_wait,
  192. (!list_empty(&ev_queue->event_list) ||
  193. ev_queue->is_closed)))
  194. return -ERESTARTSYS;
  195. spin_lock_irq(&ev_queue->lock);
  196. /* If device was disassociated and no event exists set an error */
  197. if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) {
  198. spin_unlock_irq(&ev_queue->lock);
  199. return -EIO;
  200. }
  201. }
  202. event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
  203. if (eventsz > count) {
  204. ret = -EINVAL;
  205. event = NULL;
  206. } else {
  207. list_del(ev_queue->event_list.next);
  208. if (event->counter) {
  209. ++(*event->counter);
  210. list_del(&event->obj_list);
  211. }
  212. }
  213. spin_unlock_irq(&ev_queue->lock);
  214. if (event) {
  215. if (copy_to_user(buf, event, eventsz))
  216. ret = -EFAULT;
  217. else
  218. ret = eventsz;
  219. }
  220. kfree(event);
  221. return ret;
  222. }
  223. static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
  224. size_t count, loff_t *pos)
  225. {
  226. struct ib_uverbs_async_event_file *file = filp->private_data;
  227. return ib_uverbs_event_read(&file->ev_queue, filp, buf, count, pos,
  228. sizeof(struct ib_uverbs_async_event_desc));
  229. }
  230. static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
  231. size_t count, loff_t *pos)
  232. {
  233. struct ib_uverbs_completion_event_file *comp_ev_file =
  234. filp->private_data;
  235. return ib_uverbs_event_read(&comp_ev_file->ev_queue, filp, buf, count,
  236. pos,
  237. sizeof(struct ib_uverbs_comp_event_desc));
  238. }
  239. static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
  240. struct file *filp,
  241. struct poll_table_struct *wait)
  242. {
  243. __poll_t pollflags = 0;
  244. poll_wait(filp, &ev_queue->poll_wait, wait);
  245. spin_lock_irq(&ev_queue->lock);
  246. if (!list_empty(&ev_queue->event_list))
  247. pollflags = EPOLLIN | EPOLLRDNORM;
  248. else if (ev_queue->is_closed)
  249. pollflags = EPOLLERR;
  250. spin_unlock_irq(&ev_queue->lock);
  251. return pollflags;
  252. }
  253. static __poll_t ib_uverbs_async_event_poll(struct file *filp,
  254. struct poll_table_struct *wait)
  255. {
  256. struct ib_uverbs_async_event_file *file = filp->private_data;
  257. return ib_uverbs_event_poll(&file->ev_queue, filp, wait);
  258. }
  259. static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
  260. struct poll_table_struct *wait)
  261. {
  262. struct ib_uverbs_completion_event_file *comp_ev_file =
  263. filp->private_data;
  264. return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait);
  265. }
  266. static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
  267. {
  268. struct ib_uverbs_async_event_file *file = filp->private_data;
  269. return fasync_helper(fd, filp, on, &file->ev_queue.async_queue);
  270. }
  271. static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
  272. {
  273. struct ib_uverbs_completion_event_file *comp_ev_file =
  274. filp->private_data;
  275. return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue);
  276. }
  277. const struct file_operations uverbs_event_fops = {
  278. .owner = THIS_MODULE,
  279. .read = ib_uverbs_comp_event_read,
  280. .poll = ib_uverbs_comp_event_poll,
  281. .release = uverbs_uobject_fd_release,
  282. .fasync = ib_uverbs_comp_event_fasync,
  283. .llseek = no_llseek,
  284. };
  285. const struct file_operations uverbs_async_event_fops = {
  286. .owner = THIS_MODULE,
  287. .read = ib_uverbs_async_event_read,
  288. .poll = ib_uverbs_async_event_poll,
  289. .release = uverbs_async_event_release,
  290. .fasync = ib_uverbs_async_event_fasync,
  291. .llseek = no_llseek,
  292. };
  293. void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
  294. {
  295. struct ib_uverbs_event_queue *ev_queue = cq_context;
  296. struct ib_ucq_object *uobj;
  297. struct ib_uverbs_event *entry;
  298. unsigned long flags;
  299. if (!ev_queue)
  300. return;
  301. spin_lock_irqsave(&ev_queue->lock, flags);
  302. if (ev_queue->is_closed) {
  303. spin_unlock_irqrestore(&ev_queue->lock, flags);
  304. return;
  305. }
  306. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  307. if (!entry) {
  308. spin_unlock_irqrestore(&ev_queue->lock, flags);
  309. return;
  310. }
  311. uobj = cq->uobject;
  312. entry->desc.comp.cq_handle = cq->uobject->uevent.uobject.user_handle;
  313. entry->counter = &uobj->comp_events_reported;
  314. list_add_tail(&entry->list, &ev_queue->event_list);
  315. list_add_tail(&entry->obj_list, &uobj->comp_list);
  316. spin_unlock_irqrestore(&ev_queue->lock, flags);
  317. wake_up_interruptible(&ev_queue->poll_wait);
  318. kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
  319. }
  320. void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
  321. __u64 element, __u64 event,
  322. struct list_head *obj_list, u32 *counter)
  323. {
  324. struct ib_uverbs_event *entry;
  325. unsigned long flags;
  326. if (!async_file)
  327. return;
  328. spin_lock_irqsave(&async_file->ev_queue.lock, flags);
  329. if (async_file->ev_queue.is_closed) {
  330. spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
  331. return;
  332. }
  333. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  334. if (!entry) {
  335. spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
  336. return;
  337. }
  338. entry->desc.async.element = element;
  339. entry->desc.async.event_type = event;
  340. entry->desc.async.reserved = 0;
  341. entry->counter = counter;
  342. list_add_tail(&entry->list, &async_file->ev_queue.event_list);
  343. if (obj_list)
  344. list_add_tail(&entry->obj_list, obj_list);
  345. spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
  346. wake_up_interruptible(&async_file->ev_queue.poll_wait);
  347. kill_fasync(&async_file->ev_queue.async_queue, SIGIO, POLL_IN);
  348. }
  349. static void uverbs_uobj_event(struct ib_uevent_object *eobj,
  350. struct ib_event *event)
  351. {
  352. ib_uverbs_async_handler(eobj->event_file,
  353. eobj->uobject.user_handle, event->event,
  354. &eobj->event_list, &eobj->events_reported);
  355. }
  356. void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
  357. {
  358. uverbs_uobj_event(&event->element.cq->uobject->uevent, event);
  359. }
  360. void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
  361. {
  362. /* for XRC target qp's, check that qp is live */
  363. if (!event->element.qp->uobject)
  364. return;
  365. uverbs_uobj_event(&event->element.qp->uobject->uevent, event);
  366. }
  367. void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
  368. {
  369. uverbs_uobj_event(&event->element.wq->uobject->uevent, event);
  370. }
  371. void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
  372. {
  373. uverbs_uobj_event(&event->element.srq->uobject->uevent, event);
  374. }
  375. static void ib_uverbs_event_handler(struct ib_event_handler *handler,
  376. struct ib_event *event)
  377. {
  378. ib_uverbs_async_handler(
  379. container_of(handler, struct ib_uverbs_async_event_file,
  380. event_handler),
  381. event->element.port_num, event->event, NULL, NULL);
  382. }
  383. void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue)
  384. {
  385. spin_lock_init(&ev_queue->lock);
  386. INIT_LIST_HEAD(&ev_queue->event_list);
  387. init_waitqueue_head(&ev_queue->poll_wait);
  388. ev_queue->is_closed = 0;
  389. ev_queue->async_queue = NULL;
  390. }
  391. void ib_uverbs_init_async_event_file(
  392. struct ib_uverbs_async_event_file *async_file)
  393. {
  394. struct ib_uverbs_file *uverbs_file = async_file->uobj.ufile;
  395. struct ib_device *ib_dev = async_file->uobj.context->device;
  396. ib_uverbs_init_event_queue(&async_file->ev_queue);
  397. /* The first async_event_file becomes the default one for the file. */
  398. mutex_lock(&uverbs_file->ucontext_lock);
  399. if (!uverbs_file->default_async_file) {
  400. /* Pairs with the put in ib_uverbs_release_file */
  401. uverbs_uobject_get(&async_file->uobj);
  402. smp_store_release(&uverbs_file->default_async_file, async_file);
  403. }
  404. mutex_unlock(&uverbs_file->ucontext_lock);
  405. INIT_IB_EVENT_HANDLER(&async_file->event_handler, ib_dev,
  406. ib_uverbs_event_handler);
  407. ib_register_event_handler(&async_file->event_handler);
  408. }
  409. static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
  410. struct ib_uverbs_ex_cmd_hdr *ex_hdr, size_t count,
  411. const struct uverbs_api_write_method *method_elm)
  412. {
  413. if (method_elm->is_ex) {
  414. count -= sizeof(*hdr) + sizeof(*ex_hdr);
  415. if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count)
  416. return -EINVAL;
  417. if (hdr->in_words * 8 < method_elm->req_size)
  418. return -ENOSPC;
  419. if (ex_hdr->cmd_hdr_reserved)
  420. return -EINVAL;
  421. if (ex_hdr->response) {
  422. if (!hdr->out_words && !ex_hdr->provider_out_words)
  423. return -EINVAL;
  424. if (hdr->out_words * 8 < method_elm->resp_size)
  425. return -ENOSPC;
  426. if (!access_ok(u64_to_user_ptr(ex_hdr->response),
  427. (hdr->out_words + ex_hdr->provider_out_words) * 8))
  428. return -EFAULT;
  429. } else {
  430. if (hdr->out_words || ex_hdr->provider_out_words)
  431. return -EINVAL;
  432. }
  433. return 0;
  434. }
  435. /* not extended command */
  436. if (hdr->in_words * 4 != count)
  437. return -EINVAL;
  438. if (count < method_elm->req_size + sizeof(hdr)) {
  439. /*
  440. * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
  441. * with a 16 byte write instead of 24. Old kernels didn't
  442. * check the size so they allowed this. Now that the size is
  443. * checked provide a compatibility work around to not break
  444. * those userspaces.
  445. */
  446. if (hdr->command == IB_USER_VERBS_CMD_DESTROY_CQ &&
  447. count == 16) {
  448. hdr->in_words = 6;
  449. return 0;
  450. }
  451. return -ENOSPC;
  452. }
  453. if (hdr->out_words * 4 < method_elm->resp_size)
  454. return -ENOSPC;
  455. return 0;
  456. }
  457. static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
  458. size_t count, loff_t *pos)
  459. {
  460. struct ib_uverbs_file *file = filp->private_data;
  461. const struct uverbs_api_write_method *method_elm;
  462. struct uverbs_api *uapi = file->device->uapi;
  463. struct ib_uverbs_ex_cmd_hdr ex_hdr;
  464. struct ib_uverbs_cmd_hdr hdr;
  465. struct uverbs_attr_bundle bundle;
  466. int srcu_key;
  467. ssize_t ret;
  468. if (!ib_safe_file_access(filp)) {
  469. pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
  470. task_tgid_vnr(current), current->comm);
  471. return -EACCES;
  472. }
  473. if (count < sizeof(hdr))
  474. return -EINVAL;
  475. if (copy_from_user(&hdr, buf, sizeof(hdr)))
  476. return -EFAULT;
  477. method_elm = uapi_get_method(uapi, hdr.command);
  478. if (IS_ERR(method_elm))
  479. return PTR_ERR(method_elm);
  480. if (method_elm->is_ex) {
  481. if (count < (sizeof(hdr) + sizeof(ex_hdr)))
  482. return -EINVAL;
  483. if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr)))
  484. return -EFAULT;
  485. }
  486. ret = verify_hdr(&hdr, &ex_hdr, count, method_elm);
  487. if (ret)
  488. return ret;
  489. srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
  490. buf += sizeof(hdr);
  491. memset(bundle.attr_present, 0, sizeof(bundle.attr_present));
  492. bundle.ufile = file;
  493. bundle.context = NULL; /* only valid if bundle has uobject */
  494. bundle.uobject = NULL;
  495. if (!method_elm->is_ex) {
  496. size_t in_len = hdr.in_words * 4 - sizeof(hdr);
  497. size_t out_len = hdr.out_words * 4;
  498. u64 response = 0;
  499. if (method_elm->has_udata) {
  500. bundle.driver_udata.inlen =
  501. in_len - method_elm->req_size;
  502. in_len = method_elm->req_size;
  503. if (bundle.driver_udata.inlen)
  504. bundle.driver_udata.inbuf = buf + in_len;
  505. else
  506. bundle.driver_udata.inbuf = NULL;
  507. } else {
  508. memset(&bundle.driver_udata, 0,
  509. sizeof(bundle.driver_udata));
  510. }
  511. if (method_elm->has_resp) {
  512. /*
  513. * The macros check that if has_resp is set
  514. * then the command request structure starts
  515. * with a '__aligned u64 response' member.
  516. */
  517. ret = get_user(response, (const u64 __user *)buf);
  518. if (ret)
  519. goto out_unlock;
  520. if (method_elm->has_udata) {
  521. bundle.driver_udata.outlen =
  522. out_len - method_elm->resp_size;
  523. out_len = method_elm->resp_size;
  524. if (bundle.driver_udata.outlen)
  525. bundle.driver_udata.outbuf =
  526. u64_to_user_ptr(response +
  527. out_len);
  528. else
  529. bundle.driver_udata.outbuf = NULL;
  530. }
  531. } else {
  532. bundle.driver_udata.outlen = 0;
  533. bundle.driver_udata.outbuf = NULL;
  534. }
  535. ib_uverbs_init_udata_buf_or_null(
  536. &bundle.ucore, buf, u64_to_user_ptr(response),
  537. in_len, out_len);
  538. } else {
  539. buf += sizeof(ex_hdr);
  540. ib_uverbs_init_udata_buf_or_null(&bundle.ucore, buf,
  541. u64_to_user_ptr(ex_hdr.response),
  542. hdr.in_words * 8, hdr.out_words * 8);
  543. ib_uverbs_init_udata_buf_or_null(
  544. &bundle.driver_udata, buf + bundle.ucore.inlen,
  545. u64_to_user_ptr(ex_hdr.response) + bundle.ucore.outlen,
  546. ex_hdr.provider_in_words * 8,
  547. ex_hdr.provider_out_words * 8);
  548. }
  549. ret = method_elm->handler(&bundle);
  550. if (bundle.uobject)
  551. uverbs_finalize_object(bundle.uobject, UVERBS_ACCESS_NEW, true,
  552. !ret, &bundle);
  553. out_unlock:
  554. srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
  555. return (ret) ? : count;
  556. }
  557. static const struct vm_operations_struct rdma_umap_ops;
  558. static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
  559. {
  560. struct ib_uverbs_file *file = filp->private_data;
  561. struct ib_ucontext *ucontext;
  562. int ret = 0;
  563. int srcu_key;
  564. srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
  565. ucontext = ib_uverbs_get_ucontext_file(file);
  566. if (IS_ERR(ucontext)) {
  567. ret = PTR_ERR(ucontext);
  568. goto out;
  569. }
  570. vma->vm_ops = &rdma_umap_ops;
  571. ret = ucontext->device->ops.mmap(ucontext, vma);
  572. out:
  573. srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
  574. return ret;
  575. }
  576. /*
  577. * The VMA has been dup'd, initialize the vm_private_data with a new tracking
  578. * struct
  579. */
  580. static void rdma_umap_open(struct vm_area_struct *vma)
  581. {
  582. struct ib_uverbs_file *ufile = vma->vm_file->private_data;
  583. struct rdma_umap_priv *opriv = vma->vm_private_data;
  584. struct rdma_umap_priv *priv;
  585. if (!opriv)
  586. return;
  587. /* We are racing with disassociation */
  588. if (!down_read_trylock(&ufile->hw_destroy_rwsem))
  589. goto out_zap;
  590. /*
  591. * Disassociation already completed, the VMA should already be zapped.
  592. */
  593. if (!ufile->ucontext)
  594. goto out_unlock;
  595. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  596. if (!priv)
  597. goto out_unlock;
  598. rdma_umap_priv_init(priv, vma, opriv->entry);
  599. up_read(&ufile->hw_destroy_rwsem);
  600. return;
  601. out_unlock:
  602. up_read(&ufile->hw_destroy_rwsem);
  603. out_zap:
  604. /*
  605. * We can't allow the VMA to be created with the actual IO pages, that
  606. * would break our API contract, and it can't be stopped at this
  607. * point, so zap it.
  608. */
  609. vma->vm_private_data = NULL;
  610. zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
  611. }
  612. static void rdma_umap_close(struct vm_area_struct *vma)
  613. {
  614. struct ib_uverbs_file *ufile = vma->vm_file->private_data;
  615. struct rdma_umap_priv *priv = vma->vm_private_data;
  616. if (!priv)
  617. return;
  618. /*
  619. * The vma holds a reference on the struct file that created it, which
  620. * in turn means that the ib_uverbs_file is guaranteed to exist at
  621. * this point.
  622. */
  623. mutex_lock(&ufile->umap_lock);
  624. if (priv->entry)
  625. rdma_user_mmap_entry_put(priv->entry);
  626. list_del(&priv->list);
  627. mutex_unlock(&ufile->umap_lock);
  628. kfree(priv);
  629. }
  630. /*
  631. * Once the zap_vma_ptes has been called touches to the VMA will come here and
  632. * we return a dummy writable zero page for all the pfns.
  633. */
  634. static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
  635. {
  636. struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
  637. struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
  638. vm_fault_t ret = 0;
  639. if (!priv)
  640. return VM_FAULT_SIGBUS;
  641. /* Read only pages can just use the system zero page. */
  642. if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
  643. vmf->page = ZERO_PAGE(vmf->address);
  644. get_page(vmf->page);
  645. return 0;
  646. }
  647. mutex_lock(&ufile->umap_lock);
  648. if (!ufile->disassociate_page)
  649. ufile->disassociate_page =
  650. alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
  651. if (ufile->disassociate_page) {
  652. /*
  653. * This VMA is forced to always be shared so this doesn't have
  654. * to worry about COW.
  655. */
  656. vmf->page = ufile->disassociate_page;
  657. get_page(vmf->page);
  658. } else {
  659. ret = VM_FAULT_SIGBUS;
  660. }
  661. mutex_unlock(&ufile->umap_lock);
  662. return ret;
  663. }
  664. static const struct vm_operations_struct rdma_umap_ops = {
  665. .open = rdma_umap_open,
  666. .close = rdma_umap_close,
  667. .fault = rdma_umap_fault,
  668. };
  669. void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
  670. {
  671. struct rdma_umap_priv *priv, *next_priv;
  672. lockdep_assert_held(&ufile->hw_destroy_rwsem);
  673. while (1) {
  674. struct mm_struct *mm = NULL;
  675. /* Get an arbitrary mm pointer that hasn't been cleaned yet */
  676. mutex_lock(&ufile->umap_lock);
  677. while (!list_empty(&ufile->umaps)) {
  678. int ret;
  679. priv = list_first_entry(&ufile->umaps,
  680. struct rdma_umap_priv, list);
  681. mm = priv->vma->vm_mm;
  682. ret = mmget_not_zero(mm);
  683. if (!ret) {
  684. list_del_init(&priv->list);
  685. if (priv->entry) {
  686. rdma_user_mmap_entry_put(priv->entry);
  687. priv->entry = NULL;
  688. }
  689. mm = NULL;
  690. continue;
  691. }
  692. break;
  693. }
  694. mutex_unlock(&ufile->umap_lock);
  695. if (!mm)
  696. return;
  697. /*
  698. * The umap_lock is nested under mmap_lock since it used within
  699. * the vma_ops callbacks, so we have to clean the list one mm
  700. * at a time to get the lock ordering right. Typically there
  701. * will only be one mm, so no big deal.
  702. */
  703. mmap_read_lock(mm);
  704. mutex_lock(&ufile->umap_lock);
  705. list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
  706. list) {
  707. struct vm_area_struct *vma = priv->vma;
  708. if (vma->vm_mm != mm)
  709. continue;
  710. list_del_init(&priv->list);
  711. zap_vma_ptes(vma, vma->vm_start,
  712. vma->vm_end - vma->vm_start);
  713. if (priv->entry) {
  714. rdma_user_mmap_entry_put(priv->entry);
  715. priv->entry = NULL;
  716. }
  717. }
  718. mutex_unlock(&ufile->umap_lock);
  719. mmap_read_unlock(mm);
  720. mmput(mm);
  721. }
  722. }
  723. /*
  724. * ib_uverbs_open() does not need the BKL:
  725. *
  726. * - the ib_uverbs_device structures are properly reference counted and
  727. * everything else is purely local to the file being created, so
  728. * races against other open calls are not a problem;
  729. * - there is no ioctl method to race against;
  730. * - the open method will either immediately run -ENXIO, or all
  731. * required initialization will be done.
  732. */
  733. static int ib_uverbs_open(struct inode *inode, struct file *filp)
  734. {
  735. struct ib_uverbs_device *dev;
  736. struct ib_uverbs_file *file;
  737. struct ib_device *ib_dev;
  738. int ret;
  739. int module_dependent;
  740. int srcu_key;
  741. dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
  742. if (!atomic_inc_not_zero(&dev->refcount))
  743. return -ENXIO;
  744. get_device(&dev->dev);
  745. srcu_key = srcu_read_lock(&dev->disassociate_srcu);
  746. mutex_lock(&dev->lists_mutex);
  747. ib_dev = srcu_dereference(dev->ib_dev,
  748. &dev->disassociate_srcu);
  749. if (!ib_dev) {
  750. ret = -EIO;
  751. goto err;
  752. }
  753. if (!rdma_dev_access_netns(ib_dev, current->nsproxy->net_ns)) {
  754. ret = -EPERM;
  755. goto err;
  756. }
  757. /* In case IB device supports disassociate ucontext, there is no hard
  758. * dependency between uverbs device and its low level device.
  759. */
  760. module_dependent = !(ib_dev->ops.disassociate_ucontext);
  761. if (module_dependent) {
  762. if (!try_module_get(ib_dev->ops.owner)) {
  763. ret = -ENODEV;
  764. goto err;
  765. }
  766. }
  767. file = kzalloc(sizeof(*file), GFP_KERNEL);
  768. if (!file) {
  769. ret = -ENOMEM;
  770. if (module_dependent)
  771. goto err_module;
  772. goto err;
  773. }
  774. file->device = dev;
  775. kref_init(&file->ref);
  776. mutex_init(&file->ucontext_lock);
  777. spin_lock_init(&file->uobjects_lock);
  778. INIT_LIST_HEAD(&file->uobjects);
  779. init_rwsem(&file->hw_destroy_rwsem);
  780. mutex_init(&file->umap_lock);
  781. INIT_LIST_HEAD(&file->umaps);
  782. filp->private_data = file;
  783. list_add_tail(&file->list, &dev->uverbs_file_list);
  784. mutex_unlock(&dev->lists_mutex);
  785. srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
  786. setup_ufile_idr_uobject(file);
  787. return stream_open(inode, filp);
  788. err_module:
  789. module_put(ib_dev->ops.owner);
  790. err:
  791. mutex_unlock(&dev->lists_mutex);
  792. srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
  793. if (atomic_dec_and_test(&dev->refcount))
  794. ib_uverbs_comp_dev(dev);
  795. put_device(&dev->dev);
  796. return ret;
  797. }
  798. static int ib_uverbs_close(struct inode *inode, struct file *filp)
  799. {
  800. struct ib_uverbs_file *file = filp->private_data;
  801. uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE);
  802. mutex_lock(&file->device->lists_mutex);
  803. list_del_init(&file->list);
  804. mutex_unlock(&file->device->lists_mutex);
  805. kref_put(&file->ref, ib_uverbs_release_file);
  806. return 0;
  807. }
  808. static const struct file_operations uverbs_fops = {
  809. .owner = THIS_MODULE,
  810. .write = ib_uverbs_write,
  811. .open = ib_uverbs_open,
  812. .release = ib_uverbs_close,
  813. .llseek = no_llseek,
  814. .unlocked_ioctl = ib_uverbs_ioctl,
  815. .compat_ioctl = compat_ptr_ioctl,
  816. };
  817. static const struct file_operations uverbs_mmap_fops = {
  818. .owner = THIS_MODULE,
  819. .write = ib_uverbs_write,
  820. .mmap = ib_uverbs_mmap,
  821. .open = ib_uverbs_open,
  822. .release = ib_uverbs_close,
  823. .llseek = no_llseek,
  824. .unlocked_ioctl = ib_uverbs_ioctl,
  825. .compat_ioctl = compat_ptr_ioctl,
  826. };
  827. static int ib_uverbs_get_nl_info(struct ib_device *ibdev, void *client_data,
  828. struct ib_client_nl_info *res)
  829. {
  830. struct ib_uverbs_device *uverbs_dev = client_data;
  831. int ret;
  832. if (res->port != -1)
  833. return -EINVAL;
  834. res->abi = ibdev->ops.uverbs_abi_ver;
  835. res->cdev = &uverbs_dev->dev;
  836. /*
  837. * To support DRIVER_ID binding in userspace some of the driver need
  838. * upgrading to expose their PCI dependent revision information
  839. * through get_context instead of relying on modalias matching. When
  840. * the drivers are fixed they can drop this flag.
  841. */
  842. if (!ibdev->ops.uverbs_no_driver_id_binding) {
  843. ret = nla_put_u32(res->nl_msg, RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID,
  844. ibdev->ops.driver_id);
  845. if (ret)
  846. return ret;
  847. }
  848. return 0;
  849. }
  850. static struct ib_client uverbs_client = {
  851. .name = "uverbs",
  852. .no_kverbs_req = true,
  853. .add = ib_uverbs_add_one,
  854. .remove = ib_uverbs_remove_one,
  855. .get_nl_info = ib_uverbs_get_nl_info,
  856. };
  857. MODULE_ALIAS_RDMA_CLIENT("uverbs");
  858. static ssize_t ibdev_show(struct device *device, struct device_attribute *attr,
  859. char *buf)
  860. {
  861. struct ib_uverbs_device *dev =
  862. container_of(device, struct ib_uverbs_device, dev);
  863. int ret = -ENODEV;
  864. int srcu_key;
  865. struct ib_device *ib_dev;
  866. srcu_key = srcu_read_lock(&dev->disassociate_srcu);
  867. ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
  868. if (ib_dev)
  869. ret = sprintf(buf, "%s\n", dev_name(&ib_dev->dev));
  870. srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
  871. return ret;
  872. }
  873. static DEVICE_ATTR_RO(ibdev);
  874. static ssize_t abi_version_show(struct device *device,
  875. struct device_attribute *attr, char *buf)
  876. {
  877. struct ib_uverbs_device *dev =
  878. container_of(device, struct ib_uverbs_device, dev);
  879. int ret = -ENODEV;
  880. int srcu_key;
  881. struct ib_device *ib_dev;
  882. srcu_key = srcu_read_lock(&dev->disassociate_srcu);
  883. ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
  884. if (ib_dev)
  885. ret = sprintf(buf, "%u\n", ib_dev->ops.uverbs_abi_ver);
  886. srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
  887. return ret;
  888. }
  889. static DEVICE_ATTR_RO(abi_version);
  890. static struct attribute *ib_dev_attrs[] = {
  891. &dev_attr_abi_version.attr,
  892. &dev_attr_ibdev.attr,
  893. NULL,
  894. };
  895. static const struct attribute_group dev_attr_group = {
  896. .attrs = ib_dev_attrs,
  897. };
  898. static CLASS_ATTR_STRING(abi_version, S_IRUGO,
  899. __stringify(IB_USER_VERBS_ABI_VERSION));
  900. static int ib_uverbs_create_uapi(struct ib_device *device,
  901. struct ib_uverbs_device *uverbs_dev)
  902. {
  903. struct uverbs_api *uapi;
  904. uapi = uverbs_alloc_api(device);
  905. if (IS_ERR(uapi))
  906. return PTR_ERR(uapi);
  907. uverbs_dev->uapi = uapi;
  908. return 0;
  909. }
  910. static int ib_uverbs_add_one(struct ib_device *device)
  911. {
  912. int devnum;
  913. dev_t base;
  914. struct ib_uverbs_device *uverbs_dev;
  915. int ret;
  916. if (!device->ops.alloc_ucontext)
  917. return -EOPNOTSUPP;
  918. uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
  919. if (!uverbs_dev)
  920. return -ENOMEM;
  921. ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
  922. if (ret) {
  923. kfree(uverbs_dev);
  924. return -ENOMEM;
  925. }
  926. device_initialize(&uverbs_dev->dev);
  927. uverbs_dev->dev.class = uverbs_class;
  928. uverbs_dev->dev.parent = device->dev.parent;
  929. uverbs_dev->dev.release = ib_uverbs_release_dev;
  930. uverbs_dev->groups[0] = &dev_attr_group;
  931. uverbs_dev->dev.groups = uverbs_dev->groups;
  932. atomic_set(&uverbs_dev->refcount, 1);
  933. init_completion(&uverbs_dev->comp);
  934. uverbs_dev->xrcd_tree = RB_ROOT;
  935. mutex_init(&uverbs_dev->xrcd_tree_mutex);
  936. mutex_init(&uverbs_dev->lists_mutex);
  937. INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
  938. rcu_assign_pointer(uverbs_dev->ib_dev, device);
  939. uverbs_dev->num_comp_vectors = device->num_comp_vectors;
  940. devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
  941. GFP_KERNEL);
  942. if (devnum < 0) {
  943. ret = -ENOMEM;
  944. goto err;
  945. }
  946. uverbs_dev->devnum = devnum;
  947. if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
  948. base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
  949. else
  950. base = IB_UVERBS_BASE_DEV + devnum;
  951. ret = ib_uverbs_create_uapi(device, uverbs_dev);
  952. if (ret)
  953. goto err_uapi;
  954. uverbs_dev->dev.devt = base;
  955. dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
  956. cdev_init(&uverbs_dev->cdev,
  957. device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops);
  958. uverbs_dev->cdev.owner = THIS_MODULE;
  959. ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
  960. if (ret)
  961. goto err_uapi;
  962. ib_set_client_data(device, &uverbs_client, uverbs_dev);
  963. return 0;
  964. err_uapi:
  965. ida_free(&uverbs_ida, devnum);
  966. err:
  967. if (atomic_dec_and_test(&uverbs_dev->refcount))
  968. ib_uverbs_comp_dev(uverbs_dev);
  969. wait_for_completion(&uverbs_dev->comp);
  970. put_device(&uverbs_dev->dev);
  971. return ret;
  972. }
  973. static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
  974. struct ib_device *ib_dev)
  975. {
  976. struct ib_uverbs_file *file;
  977. /* Pending running commands to terminate */
  978. uverbs_disassociate_api_pre(uverbs_dev);
  979. mutex_lock(&uverbs_dev->lists_mutex);
  980. while (!list_empty(&uverbs_dev->uverbs_file_list)) {
  981. file = list_first_entry(&uverbs_dev->uverbs_file_list,
  982. struct ib_uverbs_file, list);
  983. list_del_init(&file->list);
  984. kref_get(&file->ref);
  985. /* We must release the mutex before going ahead and calling
  986. * uverbs_cleanup_ufile, as it might end up indirectly calling
  987. * uverbs_close, for example due to freeing the resources (e.g
  988. * mmput).
  989. */
  990. mutex_unlock(&uverbs_dev->lists_mutex);
  991. uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
  992. kref_put(&file->ref, ib_uverbs_release_file);
  993. mutex_lock(&uverbs_dev->lists_mutex);
  994. }
  995. mutex_unlock(&uverbs_dev->lists_mutex);
  996. uverbs_disassociate_api(uverbs_dev->uapi);
  997. }
  998. static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
  999. {
  1000. struct ib_uverbs_device *uverbs_dev = client_data;
  1001. int wait_clients = 1;
  1002. cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
  1003. ida_free(&uverbs_ida, uverbs_dev->devnum);
  1004. if (device->ops.disassociate_ucontext) {
  1005. /* We disassociate HW resources and immediately return.
  1006. * Userspace will see a EIO errno for all future access.
  1007. * Upon returning, ib_device may be freed internally and is not
  1008. * valid any more.
  1009. * uverbs_device is still available until all clients close
  1010. * their files, then the uverbs device ref count will be zero
  1011. * and its resources will be freed.
  1012. * Note: At this point no more files can be opened since the
  1013. * cdev was deleted, however active clients can still issue
  1014. * commands and close their open files.
  1015. */
  1016. ib_uverbs_free_hw_resources(uverbs_dev, device);
  1017. wait_clients = 0;
  1018. }
  1019. if (atomic_dec_and_test(&uverbs_dev->refcount))
  1020. ib_uverbs_comp_dev(uverbs_dev);
  1021. if (wait_clients)
  1022. wait_for_completion(&uverbs_dev->comp);
  1023. put_device(&uverbs_dev->dev);
  1024. }
  1025. static char *uverbs_devnode(struct device *dev, umode_t *mode)
  1026. {
  1027. if (mode)
  1028. *mode = 0666;
  1029. return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
  1030. }
  1031. static int __init ib_uverbs_init(void)
  1032. {
  1033. int ret;
  1034. ret = register_chrdev_region(IB_UVERBS_BASE_DEV,
  1035. IB_UVERBS_NUM_FIXED_MINOR,
  1036. "infiniband_verbs");
  1037. if (ret) {
  1038. pr_err("user_verbs: couldn't register device number\n");
  1039. goto out;
  1040. }
  1041. ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0,
  1042. IB_UVERBS_NUM_DYNAMIC_MINOR,
  1043. "infiniband_verbs");
  1044. if (ret) {
  1045. pr_err("couldn't register dynamic device number\n");
  1046. goto out_alloc;
  1047. }
  1048. uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
  1049. if (IS_ERR(uverbs_class)) {
  1050. ret = PTR_ERR(uverbs_class);
  1051. pr_err("user_verbs: couldn't create class infiniband_verbs\n");
  1052. goto out_chrdev;
  1053. }
  1054. uverbs_class->devnode = uverbs_devnode;
  1055. ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
  1056. if (ret) {
  1057. pr_err("user_verbs: couldn't create abi_version attribute\n");
  1058. goto out_class;
  1059. }
  1060. ret = ib_register_client(&uverbs_client);
  1061. if (ret) {
  1062. pr_err("user_verbs: couldn't register client\n");
  1063. goto out_class;
  1064. }
  1065. return 0;
  1066. out_class:
  1067. class_destroy(uverbs_class);
  1068. out_chrdev:
  1069. unregister_chrdev_region(dynamic_uverbs_dev,
  1070. IB_UVERBS_NUM_DYNAMIC_MINOR);
  1071. out_alloc:
  1072. unregister_chrdev_region(IB_UVERBS_BASE_DEV,
  1073. IB_UVERBS_NUM_FIXED_MINOR);
  1074. out:
  1075. return ret;
  1076. }
  1077. static void __exit ib_uverbs_cleanup(void)
  1078. {
  1079. ib_unregister_client(&uverbs_client);
  1080. class_destroy(uverbs_class);
  1081. unregister_chrdev_region(IB_UVERBS_BASE_DEV,
  1082. IB_UVERBS_NUM_FIXED_MINOR);
  1083. unregister_chrdev_region(dynamic_uverbs_dev,
  1084. IB_UVERBS_NUM_DYNAMIC_MINOR);
  1085. mmu_notifier_synchronize();
  1086. }
  1087. module_init(ib_uverbs_init);
  1088. module_exit(ib_uverbs_cleanup);