uverbs_ioctl.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. /*
  2. * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <rdma/rdma_user_ioctl.h>
  33. #include <rdma/uverbs_ioctl.h>
  34. #include "rdma_core.h"
  35. #include "uverbs.h"
  36. struct bundle_alloc_head {
  37. struct bundle_alloc_head *next;
  38. u8 data[];
  39. };
  40. struct bundle_priv {
  41. /* Must be first */
  42. struct bundle_alloc_head alloc_head;
  43. struct bundle_alloc_head *allocated_mem;
  44. size_t internal_avail;
  45. size_t internal_used;
  46. struct radix_tree_root *radix;
  47. const struct uverbs_api_ioctl_method *method_elm;
  48. void __rcu **radix_slots;
  49. unsigned long radix_slots_len;
  50. u32 method_key;
  51. struct ib_uverbs_attr __user *user_attrs;
  52. struct ib_uverbs_attr *uattrs;
  53. DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN);
  54. DECLARE_BITMAP(spec_finalize, UVERBS_API_ATTR_BKEY_LEN);
  55. DECLARE_BITMAP(uobj_hw_obj_valid, UVERBS_API_ATTR_BKEY_LEN);
  56. /*
  57. * Must be last. bundle ends in a flex array which overlaps
  58. * internal_buffer.
  59. */
  60. struct uverbs_attr_bundle bundle;
  61. u64 internal_buffer[32];
  62. };
  63. /*
  64. * Each method has an absolute minimum amount of memory it needs to allocate,
  65. * precompute that amount and determine if the onstack memory can be used or
  66. * if allocation is need.
  67. */
  68. void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
  69. unsigned int num_attrs)
  70. {
  71. struct bundle_priv *pbundle;
  72. size_t bundle_size =
  73. offsetof(struct bundle_priv, internal_buffer) +
  74. sizeof(*pbundle->bundle.attrs) * method_elm->key_bitmap_len +
  75. sizeof(*pbundle->uattrs) * num_attrs;
  76. method_elm->use_stack = bundle_size <= sizeof(*pbundle);
  77. method_elm->bundle_size =
  78. ALIGN(bundle_size + 256, sizeof(*pbundle->internal_buffer));
  79. /* Do not want order-2 allocations for this. */
  80. WARN_ON_ONCE(method_elm->bundle_size > PAGE_SIZE);
  81. }
  82. /**
  83. * uverbs_alloc() - Quickly allocate memory for use with a bundle
  84. * @bundle: The bundle
  85. * @size: Number of bytes to allocate
  86. * @flags: Allocator flags
  87. *
  88. * The bundle allocator is intended for allocations that are connected with
  89. * processing the system call related to the bundle. The allocated memory is
  90. * always freed once the system call completes, and cannot be freed any other
  91. * way.
  92. *
  93. * This tries to use a small pool of pre-allocated memory for performance.
  94. */
  95. __malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size,
  96. gfp_t flags)
  97. {
  98. struct bundle_priv *pbundle =
  99. container_of(bundle, struct bundle_priv, bundle);
  100. size_t new_used;
  101. void *res;
  102. if (check_add_overflow(size, pbundle->internal_used, &new_used))
  103. return ERR_PTR(-EOVERFLOW);
  104. if (new_used > pbundle->internal_avail) {
  105. struct bundle_alloc_head *buf;
  106. buf = kvmalloc(struct_size(buf, data, size), flags);
  107. if (!buf)
  108. return ERR_PTR(-ENOMEM);
  109. buf->next = pbundle->allocated_mem;
  110. pbundle->allocated_mem = buf;
  111. return buf->data;
  112. }
  113. res = (void *)pbundle->internal_buffer + pbundle->internal_used;
  114. pbundle->internal_used =
  115. ALIGN(new_used, sizeof(*pbundle->internal_buffer));
  116. if (want_init_on_alloc(flags))
  117. memset(res, 0, size);
  118. return res;
  119. }
  120. EXPORT_SYMBOL(_uverbs_alloc);
  121. static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
  122. u16 len)
  123. {
  124. if (uattr->len > sizeof_field(struct ib_uverbs_attr, data))
  125. return ib_is_buffer_cleared(u64_to_user_ptr(uattr->data) + len,
  126. uattr->len - len);
  127. return !memchr_inv((const void *)&uattr->data + len,
  128. 0, uattr->len - len);
  129. }
  130. static int uverbs_set_output(const struct uverbs_attr_bundle *bundle,
  131. const struct uverbs_attr *attr)
  132. {
  133. struct bundle_priv *pbundle =
  134. container_of(bundle, struct bundle_priv, bundle);
  135. u16 flags;
  136. flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
  137. UVERBS_ATTR_F_VALID_OUTPUT;
  138. if (put_user(flags,
  139. &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
  140. return -EFAULT;
  141. return 0;
  142. }
  143. static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
  144. const struct uverbs_api_attr *attr_uapi,
  145. struct uverbs_objs_arr_attr *attr,
  146. struct ib_uverbs_attr *uattr,
  147. u32 attr_bkey)
  148. {
  149. const struct uverbs_attr_spec *spec = &attr_uapi->spec;
  150. size_t array_len;
  151. u32 *idr_vals;
  152. int ret = 0;
  153. size_t i;
  154. if (uattr->attr_data.reserved)
  155. return -EINVAL;
  156. if (uattr->len % sizeof(u32))
  157. return -EINVAL;
  158. array_len = uattr->len / sizeof(u32);
  159. if (array_len < spec->u2.objs_arr.min_len ||
  160. array_len > spec->u2.objs_arr.max_len)
  161. return -EINVAL;
  162. attr->uobjects =
  163. uverbs_alloc(&pbundle->bundle,
  164. array_size(array_len, sizeof(*attr->uobjects)));
  165. if (IS_ERR(attr->uobjects))
  166. return PTR_ERR(attr->uobjects);
  167. /*
  168. * Since idr is 4B and *uobjects is >= 4B, we can use attr->uobjects
  169. * to store idrs array and avoid additional memory allocation. The
  170. * idrs array is offset to the end of the uobjects array so we will be
  171. * able to read idr and replace with a pointer.
  172. */
  173. idr_vals = (u32 *)(attr->uobjects + array_len) - array_len;
  174. if (uattr->len > sizeof(uattr->data)) {
  175. ret = copy_from_user(idr_vals, u64_to_user_ptr(uattr->data),
  176. uattr->len);
  177. if (ret)
  178. return -EFAULT;
  179. } else {
  180. memcpy(idr_vals, &uattr->data, uattr->len);
  181. }
  182. for (i = 0; i != array_len; i++) {
  183. attr->uobjects[i] = uverbs_get_uobject_from_file(
  184. spec->u2.objs_arr.obj_type, spec->u2.objs_arr.access,
  185. idr_vals[i], &pbundle->bundle);
  186. if (IS_ERR(attr->uobjects[i])) {
  187. ret = PTR_ERR(attr->uobjects[i]);
  188. break;
  189. }
  190. }
  191. attr->len = i;
  192. __set_bit(attr_bkey, pbundle->spec_finalize);
  193. return ret;
  194. }
  195. static void uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
  196. struct uverbs_objs_arr_attr *attr,
  197. bool commit,
  198. struct uverbs_attr_bundle *attrs)
  199. {
  200. const struct uverbs_attr_spec *spec = &attr_uapi->spec;
  201. size_t i;
  202. for (i = 0; i != attr->len; i++)
  203. uverbs_finalize_object(attr->uobjects[i],
  204. spec->u2.objs_arr.access, false, commit,
  205. attrs);
  206. }
  207. static int uverbs_process_attr(struct bundle_priv *pbundle,
  208. const struct uverbs_api_attr *attr_uapi,
  209. struct ib_uverbs_attr *uattr, u32 attr_bkey)
  210. {
  211. const struct uverbs_attr_spec *spec = &attr_uapi->spec;
  212. struct uverbs_attr *e = &pbundle->bundle.attrs[attr_bkey];
  213. const struct uverbs_attr_spec *val_spec = spec;
  214. struct uverbs_obj_attr *o_attr;
  215. switch (spec->type) {
  216. case UVERBS_ATTR_TYPE_ENUM_IN:
  217. if (uattr->attr_data.enum_data.elem_id >= spec->u.enum_def.num_elems)
  218. return -EOPNOTSUPP;
  219. if (uattr->attr_data.enum_data.reserved)
  220. return -EINVAL;
  221. val_spec = &spec->u2.enum_def.ids[uattr->attr_data.enum_data.elem_id];
  222. /* Currently we only support PTR_IN based enums */
  223. if (val_spec->type != UVERBS_ATTR_TYPE_PTR_IN)
  224. return -EOPNOTSUPP;
  225. e->ptr_attr.enum_id = uattr->attr_data.enum_data.elem_id;
  226. fallthrough;
  227. case UVERBS_ATTR_TYPE_PTR_IN:
  228. /* Ensure that any data provided by userspace beyond the known
  229. * struct is zero. Userspace that knows how to use some future
  230. * longer struct will fail here if used with an old kernel and
  231. * non-zero content, making ABI compat/discovery simpler.
  232. */
  233. if (uattr->len > val_spec->u.ptr.len &&
  234. val_spec->zero_trailing &&
  235. !uverbs_is_attr_cleared(uattr, val_spec->u.ptr.len))
  236. return -EOPNOTSUPP;
  237. fallthrough;
  238. case UVERBS_ATTR_TYPE_PTR_OUT:
  239. if (uattr->len < val_spec->u.ptr.min_len ||
  240. (!val_spec->zero_trailing &&
  241. uattr->len > val_spec->u.ptr.len))
  242. return -EINVAL;
  243. if (spec->type != UVERBS_ATTR_TYPE_ENUM_IN &&
  244. uattr->attr_data.reserved)
  245. return -EINVAL;
  246. e->ptr_attr.uattr_idx = uattr - pbundle->uattrs;
  247. e->ptr_attr.len = uattr->len;
  248. if (val_spec->alloc_and_copy && !uverbs_attr_ptr_is_inline(e)) {
  249. void *p;
  250. p = uverbs_alloc(&pbundle->bundle, uattr->len);
  251. if (IS_ERR(p))
  252. return PTR_ERR(p);
  253. e->ptr_attr.ptr = p;
  254. if (copy_from_user(p, u64_to_user_ptr(uattr->data),
  255. uattr->len))
  256. return -EFAULT;
  257. } else {
  258. e->ptr_attr.data = uattr->data;
  259. }
  260. break;
  261. case UVERBS_ATTR_TYPE_IDR:
  262. case UVERBS_ATTR_TYPE_FD:
  263. if (uattr->attr_data.reserved)
  264. return -EINVAL;
  265. if (uattr->len != 0)
  266. return -EINVAL;
  267. o_attr = &e->obj_attr;
  268. o_attr->attr_elm = attr_uapi;
  269. /*
  270. * The type of uattr->data is u64 for UVERBS_ATTR_TYPE_IDR and
  271. * s64 for UVERBS_ATTR_TYPE_FD. We can cast the u64 to s64
  272. * here without caring about truncation as we know that the
  273. * IDR implementation today rejects negative IDs
  274. */
  275. o_attr->uobject = uverbs_get_uobject_from_file(
  276. spec->u.obj.obj_type, spec->u.obj.access,
  277. uattr->data_s64, &pbundle->bundle);
  278. if (IS_ERR(o_attr->uobject))
  279. return PTR_ERR(o_attr->uobject);
  280. __set_bit(attr_bkey, pbundle->uobj_finalize);
  281. if (spec->u.obj.access == UVERBS_ACCESS_NEW) {
  282. unsigned int uattr_idx = uattr - pbundle->uattrs;
  283. s64 id = o_attr->uobject->id;
  284. /* Copy the allocated id to the user-space */
  285. if (put_user(id, &pbundle->user_attrs[uattr_idx].data))
  286. return -EFAULT;
  287. }
  288. break;
  289. case UVERBS_ATTR_TYPE_IDRS_ARRAY:
  290. return uverbs_process_idrs_array(pbundle, attr_uapi,
  291. &e->objs_arr_attr, uattr,
  292. attr_bkey);
  293. default:
  294. return -EOPNOTSUPP;
  295. }
  296. return 0;
  297. }
  298. /*
  299. * We search the radix tree with the method prefix and now we want to fast
  300. * search the suffix bits to get a particular attribute pointer. It is not
  301. * totally clear to me if this breaks the radix tree encasulation or not, but
  302. * it uses the iter data to determine if the method iter points at the same
  303. * chunk that will store the attribute, if so it just derefs it directly. By
  304. * construction in most kernel configs the method and attrs will all fit in a
  305. * single radix chunk, so in most cases this will have no search. Other cases
  306. * this falls back to a full search.
  307. */
  308. static void __rcu **uapi_get_attr_for_method(struct bundle_priv *pbundle,
  309. u32 attr_key)
  310. {
  311. void __rcu **slot;
  312. if (likely(attr_key < pbundle->radix_slots_len)) {
  313. void *entry;
  314. slot = pbundle->radix_slots + attr_key;
  315. entry = rcu_dereference_raw(*slot);
  316. if (likely(!radix_tree_is_internal_node(entry) && entry))
  317. return slot;
  318. }
  319. return radix_tree_lookup_slot(pbundle->radix,
  320. pbundle->method_key | attr_key);
  321. }
  322. static int uverbs_set_attr(struct bundle_priv *pbundle,
  323. struct ib_uverbs_attr *uattr)
  324. {
  325. u32 attr_key = uapi_key_attr(uattr->attr_id);
  326. u32 attr_bkey = uapi_bkey_attr(attr_key);
  327. const struct uverbs_api_attr *attr;
  328. void __rcu **slot;
  329. int ret;
  330. slot = uapi_get_attr_for_method(pbundle, attr_key);
  331. if (!slot) {
  332. /*
  333. * Kernel does not support the attribute but user-space says it
  334. * is mandatory
  335. */
  336. if (uattr->flags & UVERBS_ATTR_F_MANDATORY)
  337. return -EPROTONOSUPPORT;
  338. return 0;
  339. }
  340. attr = rcu_dereference_protected(*slot, true);
  341. /* Reject duplicate attributes from user-space */
  342. if (test_bit(attr_bkey, pbundle->bundle.attr_present))
  343. return -EINVAL;
  344. ret = uverbs_process_attr(pbundle, attr, uattr, attr_bkey);
  345. if (ret)
  346. return ret;
  347. __set_bit(attr_bkey, pbundle->bundle.attr_present);
  348. return 0;
  349. }
  350. static int ib_uverbs_run_method(struct bundle_priv *pbundle,
  351. unsigned int num_attrs)
  352. {
  353. int (*handler)(struct uverbs_attr_bundle *attrs);
  354. size_t uattrs_size = array_size(sizeof(*pbundle->uattrs), num_attrs);
  355. unsigned int destroy_bkey = pbundle->method_elm->destroy_bkey;
  356. unsigned int i;
  357. int ret;
  358. /* See uverbs_disassociate_api() */
  359. handler = srcu_dereference(
  360. pbundle->method_elm->handler,
  361. &pbundle->bundle.ufile->device->disassociate_srcu);
  362. if (!handler)
  363. return -EIO;
  364. pbundle->uattrs = uverbs_alloc(&pbundle->bundle, uattrs_size);
  365. if (IS_ERR(pbundle->uattrs))
  366. return PTR_ERR(pbundle->uattrs);
  367. if (copy_from_user(pbundle->uattrs, pbundle->user_attrs, uattrs_size))
  368. return -EFAULT;
  369. for (i = 0; i != num_attrs; i++) {
  370. ret = uverbs_set_attr(pbundle, &pbundle->uattrs[i]);
  371. if (unlikely(ret))
  372. return ret;
  373. }
  374. /* User space did not provide all the mandatory attributes */
  375. if (unlikely(!bitmap_subset(pbundle->method_elm->attr_mandatory,
  376. pbundle->bundle.attr_present,
  377. pbundle->method_elm->key_bitmap_len)))
  378. return -EINVAL;
  379. if (pbundle->method_elm->has_udata)
  380. uverbs_fill_udata(&pbundle->bundle,
  381. &pbundle->bundle.driver_udata,
  382. UVERBS_ATTR_UHW_IN, UVERBS_ATTR_UHW_OUT);
  383. else
  384. pbundle->bundle.driver_udata = (struct ib_udata){};
  385. if (destroy_bkey != UVERBS_API_ATTR_BKEY_LEN) {
  386. struct uverbs_obj_attr *destroy_attr =
  387. &pbundle->bundle.attrs[destroy_bkey].obj_attr;
  388. ret = uobj_destroy(destroy_attr->uobject, &pbundle->bundle);
  389. if (ret)
  390. return ret;
  391. __clear_bit(destroy_bkey, pbundle->uobj_finalize);
  392. ret = handler(&pbundle->bundle);
  393. uobj_put_destroy(destroy_attr->uobject);
  394. } else {
  395. ret = handler(&pbundle->bundle);
  396. }
  397. /*
  398. * Until the drivers are revised to use the bundle directly we have to
  399. * assume that the driver wrote to its UHW_OUT and flag userspace
  400. * appropriately.
  401. */
  402. if (!ret && pbundle->method_elm->has_udata) {
  403. const struct uverbs_attr *attr =
  404. uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT);
  405. if (!IS_ERR(attr))
  406. ret = uverbs_set_output(&pbundle->bundle, attr);
  407. }
  408. /*
  409. * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
  410. * not invoke the method because the request is not supported. No
  411. * other cases should return this code.
  412. */
  413. if (WARN_ON_ONCE(ret == -EPROTONOSUPPORT))
  414. return -EINVAL;
  415. return ret;
  416. }
  417. static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
  418. {
  419. unsigned int key_bitmap_len = pbundle->method_elm->key_bitmap_len;
  420. struct bundle_alloc_head *memblock;
  421. unsigned int i;
  422. /* fast path for simple uobjects */
  423. i = -1;
  424. while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len,
  425. i + 1)) < key_bitmap_len) {
  426. struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
  427. uverbs_finalize_object(
  428. attr->obj_attr.uobject,
  429. attr->obj_attr.attr_elm->spec.u.obj.access,
  430. test_bit(i, pbundle->uobj_hw_obj_valid),
  431. commit,
  432. &pbundle->bundle);
  433. }
  434. i = -1;
  435. while ((i = find_next_bit(pbundle->spec_finalize, key_bitmap_len,
  436. i + 1)) < key_bitmap_len) {
  437. struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
  438. const struct uverbs_api_attr *attr_uapi;
  439. void __rcu **slot;
  440. slot = uapi_get_attr_for_method(
  441. pbundle,
  442. pbundle->method_key | uapi_bkey_to_key_attr(i));
  443. if (WARN_ON(!slot))
  444. continue;
  445. attr_uapi = rcu_dereference_protected(*slot, true);
  446. if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
  447. uverbs_free_idrs_array(attr_uapi, &attr->objs_arr_attr,
  448. commit, &pbundle->bundle);
  449. }
  450. }
  451. for (memblock = pbundle->allocated_mem; memblock;) {
  452. struct bundle_alloc_head *tmp = memblock;
  453. memblock = memblock->next;
  454. kvfree(tmp);
  455. }
  456. }
  457. static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
  458. struct ib_uverbs_ioctl_hdr *hdr,
  459. struct ib_uverbs_attr __user *user_attrs)
  460. {
  461. const struct uverbs_api_ioctl_method *method_elm;
  462. struct uverbs_api *uapi = ufile->device->uapi;
  463. struct radix_tree_iter attrs_iter;
  464. struct bundle_priv *pbundle;
  465. struct bundle_priv onstack;
  466. void __rcu **slot;
  467. int ret;
  468. if (unlikely(hdr->driver_id != uapi->driver_id))
  469. return -EINVAL;
  470. slot = radix_tree_iter_lookup(
  471. &uapi->radix, &attrs_iter,
  472. uapi_key_obj(hdr->object_id) |
  473. uapi_key_ioctl_method(hdr->method_id));
  474. if (unlikely(!slot))
  475. return -EPROTONOSUPPORT;
  476. method_elm = rcu_dereference_protected(*slot, true);
  477. if (!method_elm->use_stack) {
  478. pbundle = kmalloc(method_elm->bundle_size, GFP_KERNEL);
  479. if (!pbundle)
  480. return -ENOMEM;
  481. pbundle->internal_avail =
  482. method_elm->bundle_size -
  483. offsetof(struct bundle_priv, internal_buffer);
  484. pbundle->alloc_head.next = NULL;
  485. pbundle->allocated_mem = &pbundle->alloc_head;
  486. } else {
  487. pbundle = &onstack;
  488. pbundle->internal_avail = sizeof(pbundle->internal_buffer);
  489. pbundle->allocated_mem = NULL;
  490. }
  491. /* Space for the pbundle->bundle.attrs flex array */
  492. pbundle->method_elm = method_elm;
  493. pbundle->method_key = attrs_iter.index;
  494. pbundle->bundle.ufile = ufile;
  495. pbundle->bundle.context = NULL; /* only valid if bundle has uobject */
  496. pbundle->radix = &uapi->radix;
  497. pbundle->radix_slots = slot;
  498. pbundle->radix_slots_len = radix_tree_chunk_size(&attrs_iter);
  499. pbundle->user_attrs = user_attrs;
  500. pbundle->internal_used = ALIGN(pbundle->method_elm->key_bitmap_len *
  501. sizeof(*pbundle->bundle.attrs),
  502. sizeof(*pbundle->internal_buffer));
  503. memset(pbundle->bundle.attr_present, 0,
  504. sizeof(pbundle->bundle.attr_present));
  505. memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
  506. memset(pbundle->spec_finalize, 0, sizeof(pbundle->spec_finalize));
  507. memset(pbundle->uobj_hw_obj_valid, 0,
  508. sizeof(pbundle->uobj_hw_obj_valid));
  509. ret = ib_uverbs_run_method(pbundle, hdr->num_attrs);
  510. bundle_destroy(pbundle, ret == 0);
  511. return ret;
  512. }
  513. long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  514. {
  515. struct ib_uverbs_file *file = filp->private_data;
  516. struct ib_uverbs_ioctl_hdr __user *user_hdr =
  517. (struct ib_uverbs_ioctl_hdr __user *)arg;
  518. struct ib_uverbs_ioctl_hdr hdr;
  519. int srcu_key;
  520. int err;
  521. if (unlikely(cmd != RDMA_VERBS_IOCTL))
  522. return -ENOIOCTLCMD;
  523. err = copy_from_user(&hdr, user_hdr, sizeof(hdr));
  524. if (err)
  525. return -EFAULT;
  526. if (hdr.length > PAGE_SIZE ||
  527. hdr.length != struct_size(&hdr, attrs, hdr.num_attrs))
  528. return -EINVAL;
  529. if (hdr.reserved1 || hdr.reserved2)
  530. return -EPROTONOSUPPORT;
  531. srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
  532. err = ib_uverbs_cmd_verbs(file, &hdr, user_hdr->attrs);
  533. srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
  534. return err;
  535. }
  536. int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
  537. size_t idx, u64 allowed_bits)
  538. {
  539. const struct uverbs_attr *attr;
  540. u64 flags;
  541. attr = uverbs_attr_get(attrs_bundle, idx);
  542. /* Missing attribute means 0 flags */
  543. if (IS_ERR(attr)) {
  544. *to = 0;
  545. return 0;
  546. }
  547. /*
  548. * New userspace code should use 8 bytes to pass flags, but we
  549. * transparently support old userspaces that were using 4 bytes as
  550. * well.
  551. */
  552. if (attr->ptr_attr.len == 8)
  553. flags = attr->ptr_attr.data;
  554. else if (attr->ptr_attr.len == 4)
  555. flags = *(u32 *)&attr->ptr_attr.data;
  556. else
  557. return -EINVAL;
  558. if (flags & ~allowed_bits)
  559. return -EINVAL;
  560. *to = flags;
  561. return 0;
  562. }
  563. EXPORT_SYMBOL(uverbs_get_flags64);
  564. int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle,
  565. size_t idx, u64 allowed_bits)
  566. {
  567. u64 flags;
  568. int ret;
  569. ret = uverbs_get_flags64(&flags, attrs_bundle, idx, allowed_bits);
  570. if (ret)
  571. return ret;
  572. if (flags > U32_MAX)
  573. return -EINVAL;
  574. *to = flags;
  575. return 0;
  576. }
  577. EXPORT_SYMBOL(uverbs_get_flags32);
  578. /*
  579. * Fill a ib_udata struct (core or uhw) using the given attribute IDs.
  580. * This is primarily used to convert the UVERBS_ATTR_UHW() into the
  581. * ib_udata format used by the drivers.
  582. */
  583. void uverbs_fill_udata(struct uverbs_attr_bundle *bundle,
  584. struct ib_udata *udata, unsigned int attr_in,
  585. unsigned int attr_out)
  586. {
  587. struct bundle_priv *pbundle =
  588. container_of(bundle, struct bundle_priv, bundle);
  589. const struct uverbs_attr *in =
  590. uverbs_attr_get(&pbundle->bundle, attr_in);
  591. const struct uverbs_attr *out =
  592. uverbs_attr_get(&pbundle->bundle, attr_out);
  593. if (!IS_ERR(in)) {
  594. udata->inlen = in->ptr_attr.len;
  595. if (uverbs_attr_ptr_is_inline(in))
  596. udata->inbuf =
  597. &pbundle->user_attrs[in->ptr_attr.uattr_idx]
  598. .data;
  599. else
  600. udata->inbuf = u64_to_user_ptr(in->ptr_attr.data);
  601. } else {
  602. udata->inbuf = NULL;
  603. udata->inlen = 0;
  604. }
  605. if (!IS_ERR(out)) {
  606. udata->outbuf = u64_to_user_ptr(out->ptr_attr.data);
  607. udata->outlen = out->ptr_attr.len;
  608. } else {
  609. udata->outbuf = NULL;
  610. udata->outlen = 0;
  611. }
  612. }
  613. int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
  614. const void *from, size_t size)
  615. {
  616. const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
  617. size_t min_size;
  618. if (IS_ERR(attr))
  619. return PTR_ERR(attr);
  620. min_size = min_t(size_t, attr->ptr_attr.len, size);
  621. if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
  622. return -EFAULT;
  623. return uverbs_set_output(bundle, attr);
  624. }
  625. EXPORT_SYMBOL(uverbs_copy_to);
  626. /*
  627. * This is only used if the caller has directly used copy_to_use to write the
  628. * data. It signals to user space that the buffer is filled in.
  629. */
  630. int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx)
  631. {
  632. const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
  633. if (IS_ERR(attr))
  634. return PTR_ERR(attr);
  635. return uverbs_set_output(bundle, attr);
  636. }
  637. int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
  638. size_t idx, s64 lower_bound, u64 upper_bound,
  639. s64 *def_val)
  640. {
  641. const struct uverbs_attr *attr;
  642. attr = uverbs_attr_get(attrs_bundle, idx);
  643. if (IS_ERR(attr)) {
  644. if ((PTR_ERR(attr) != -ENOENT) || !def_val)
  645. return PTR_ERR(attr);
  646. *to = *def_val;
  647. } else {
  648. *to = attr->ptr_attr.data;
  649. }
  650. if (*to < lower_bound || (*to > 0 && (u64)*to > upper_bound))
  651. return -EINVAL;
  652. return 0;
  653. }
  654. EXPORT_SYMBOL(_uverbs_get_const);
  655. int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
  656. size_t idx, const void *from, size_t size)
  657. {
  658. const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
  659. if (IS_ERR(attr))
  660. return PTR_ERR(attr);
  661. if (size < attr->ptr_attr.len) {
  662. if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size,
  663. attr->ptr_attr.len - size))
  664. return -EFAULT;
  665. }
  666. return uverbs_copy_to(bundle, idx, from, size);
  667. }
  668. EXPORT_SYMBOL(uverbs_copy_to_struct_or_zero);
  669. /* Once called an abort will call through to the type's destroy_hw() */
  670. void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *bundle,
  671. u16 idx)
  672. {
  673. struct bundle_priv *pbundle =
  674. container_of(bundle, struct bundle_priv, bundle);
  675. __set_bit(uapi_bkey_attr(uapi_key_attr(idx)),
  676. pbundle->uobj_hw_obj_valid);
  677. }
  678. EXPORT_SYMBOL(uverbs_finalize_uobj_create);