device.c 74 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/module.h>
  34. #include <linux/string.h>
  35. #include <linux/errno.h>
  36. #include <linux/kernel.h>
  37. #include <linux/slab.h>
  38. #include <linux/init.h>
  39. #include <linux/netdevice.h>
  40. #include <net/net_namespace.h>
  41. #include <linux/security.h>
  42. #include <linux/notifier.h>
  43. #include <linux/hashtable.h>
  44. #include <rdma/rdma_netlink.h>
  45. #include <rdma/ib_addr.h>
  46. #include <rdma/ib_cache.h>
  47. #include <rdma/rdma_counter.h>
  48. #include "core_priv.h"
  49. #include "restrack.h"
  50. MODULE_AUTHOR("Roland Dreier");
  51. MODULE_DESCRIPTION("core kernel InfiniBand API");
  52. MODULE_LICENSE("Dual BSD/GPL");
  53. struct workqueue_struct *ib_comp_wq;
  54. struct workqueue_struct *ib_comp_unbound_wq;
  55. struct workqueue_struct *ib_wq;
  56. EXPORT_SYMBOL_GPL(ib_wq);
  57. /*
  58. * Each of the three rwsem locks (devices, clients, client_data) protects the
  59. * xarray of the same name. Specifically it allows the caller to assert that
  60. * the MARK will/will not be changing under the lock, and for devices and
  61. * clients, that the value in the xarray is still a valid pointer. Change of
  62. * the MARK is linked to the object state, so holding the lock and testing the
  63. * MARK also asserts that the contained object is in a certain state.
  64. *
  65. * This is used to build a two stage register/unregister flow where objects
  66. * can continue to be in the xarray even though they are still in progress to
  67. * register/unregister.
  68. *
  69. * The xarray itself provides additional locking, and restartable iteration,
  70. * which is also relied on.
  71. *
  72. * Locks should not be nested, with the exception of client_data, which is
  73. * allowed to nest under the read side of the other two locks.
  74. *
  75. * The devices_rwsem also protects the device name list, any change or
  76. * assignment of device name must also hold the write side to guarantee unique
  77. * names.
  78. */
  79. /*
  80. * devices contains devices that have had their names assigned. The
  81. * devices may not be registered. Users that care about the registration
  82. * status need to call ib_device_try_get() on the device to ensure it is
  83. * registered, and keep it registered, for the required duration.
  84. *
  85. */
  86. static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
  87. static DECLARE_RWSEM(devices_rwsem);
  88. #define DEVICE_REGISTERED XA_MARK_1
  89. static u32 highest_client_id;
  90. #define CLIENT_REGISTERED XA_MARK_1
  91. static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
  92. static DECLARE_RWSEM(clients_rwsem);
  93. static void ib_client_put(struct ib_client *client)
  94. {
  95. if (refcount_dec_and_test(&client->uses))
  96. complete(&client->uses_zero);
  97. }
  98. /*
  99. * If client_data is registered then the corresponding client must also still
  100. * be registered.
  101. */
  102. #define CLIENT_DATA_REGISTERED XA_MARK_1
  103. unsigned int rdma_dev_net_id;
  104. /*
  105. * A list of net namespaces is maintained in an xarray. This is necessary
  106. * because we can't get the locking right using the existing net ns list. We
  107. * would require a init_net callback after the list is updated.
  108. */
  109. static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC);
  110. /*
  111. * rwsem to protect accessing the rdma_nets xarray entries.
  112. */
  113. static DECLARE_RWSEM(rdma_nets_rwsem);
  114. bool ib_devices_shared_netns = true;
  115. module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
  116. MODULE_PARM_DESC(netns_mode,
  117. "Share device among net namespaces; default=1 (shared)");
  118. /**
  119. * rdma_dev_access_netns() - Return whether an rdma device can be accessed
  120. * from a specified net namespace or not.
  121. * @dev: Pointer to rdma device which needs to be checked
  122. * @net: Pointer to net namesapce for which access to be checked
  123. *
  124. * When the rdma device is in shared mode, it ignores the net namespace.
  125. * When the rdma device is exclusive to a net namespace, rdma device net
  126. * namespace is checked against the specified one.
  127. */
  128. bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
  129. {
  130. return (ib_devices_shared_netns ||
  131. net_eq(read_pnet(&dev->coredev.rdma_net), net));
  132. }
  133. EXPORT_SYMBOL(rdma_dev_access_netns);
  134. /*
  135. * xarray has this behavior where it won't iterate over NULL values stored in
  136. * allocated arrays. So we need our own iterator to see all values stored in
  137. * the array. This does the same thing as xa_for_each except that it also
  138. * returns NULL valued entries if the array is allocating. Simplified to only
  139. * work on simple xarrays.
  140. */
  141. static void *xan_find_marked(struct xarray *xa, unsigned long *indexp,
  142. xa_mark_t filter)
  143. {
  144. XA_STATE(xas, xa, *indexp);
  145. void *entry;
  146. rcu_read_lock();
  147. do {
  148. entry = xas_find_marked(&xas, ULONG_MAX, filter);
  149. if (xa_is_zero(entry))
  150. break;
  151. } while (xas_retry(&xas, entry));
  152. rcu_read_unlock();
  153. if (entry) {
  154. *indexp = xas.xa_index;
  155. if (xa_is_zero(entry))
  156. return NULL;
  157. return entry;
  158. }
  159. return XA_ERROR(-ENOENT);
  160. }
  161. #define xan_for_each_marked(xa, index, entry, filter) \
  162. for (index = 0, entry = xan_find_marked(xa, &(index), filter); \
  163. !xa_is_err(entry); \
  164. (index)++, entry = xan_find_marked(xa, &(index), filter))
  165. /* RCU hash table mapping netdevice pointers to struct ib_port_data */
  166. static DEFINE_SPINLOCK(ndev_hash_lock);
  167. static DECLARE_HASHTABLE(ndev_hash, 5);
  168. static void free_netdevs(struct ib_device *ib_dev);
  169. static void ib_unregister_work(struct work_struct *work);
  170. static void __ib_unregister_device(struct ib_device *device);
  171. static int ib_security_change(struct notifier_block *nb, unsigned long event,
  172. void *lsm_data);
  173. static void ib_policy_change_task(struct work_struct *work);
  174. static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
  175. static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
  176. struct va_format *vaf)
  177. {
  178. if (ibdev && ibdev->dev.parent)
  179. dev_printk_emit(level[1] - '0',
  180. ibdev->dev.parent,
  181. "%s %s %s: %pV",
  182. dev_driver_string(ibdev->dev.parent),
  183. dev_name(ibdev->dev.parent),
  184. dev_name(&ibdev->dev),
  185. vaf);
  186. else if (ibdev)
  187. printk("%s%s: %pV",
  188. level, dev_name(&ibdev->dev), vaf);
  189. else
  190. printk("%s(NULL ib_device): %pV", level, vaf);
  191. }
  192. void ibdev_printk(const char *level, const struct ib_device *ibdev,
  193. const char *format, ...)
  194. {
  195. struct va_format vaf;
  196. va_list args;
  197. va_start(args, format);
  198. vaf.fmt = format;
  199. vaf.va = &args;
  200. __ibdev_printk(level, ibdev, &vaf);
  201. va_end(args);
  202. }
  203. EXPORT_SYMBOL(ibdev_printk);
  204. #define define_ibdev_printk_level(func, level) \
  205. void func(const struct ib_device *ibdev, const char *fmt, ...) \
  206. { \
  207. struct va_format vaf; \
  208. va_list args; \
  209. \
  210. va_start(args, fmt); \
  211. \
  212. vaf.fmt = fmt; \
  213. vaf.va = &args; \
  214. \
  215. __ibdev_printk(level, ibdev, &vaf); \
  216. \
  217. va_end(args); \
  218. } \
  219. EXPORT_SYMBOL(func);
  220. define_ibdev_printk_level(ibdev_emerg, KERN_EMERG);
  221. define_ibdev_printk_level(ibdev_alert, KERN_ALERT);
  222. define_ibdev_printk_level(ibdev_crit, KERN_CRIT);
  223. define_ibdev_printk_level(ibdev_err, KERN_ERR);
  224. define_ibdev_printk_level(ibdev_warn, KERN_WARNING);
  225. define_ibdev_printk_level(ibdev_notice, KERN_NOTICE);
  226. define_ibdev_printk_level(ibdev_info, KERN_INFO);
  227. static struct notifier_block ibdev_lsm_nb = {
  228. .notifier_call = ib_security_change,
  229. };
  230. static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
  231. struct net *net);
  232. /* Pointer to the RCU head at the start of the ib_port_data array */
  233. struct ib_port_data_rcu {
  234. struct rcu_head rcu_head;
  235. struct ib_port_data pdata[];
  236. };
  237. static void ib_device_check_mandatory(struct ib_device *device)
  238. {
  239. #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
  240. static const struct {
  241. size_t offset;
  242. char *name;
  243. } mandatory_table[] = {
  244. IB_MANDATORY_FUNC(query_device),
  245. IB_MANDATORY_FUNC(query_port),
  246. IB_MANDATORY_FUNC(alloc_pd),
  247. IB_MANDATORY_FUNC(dealloc_pd),
  248. IB_MANDATORY_FUNC(create_qp),
  249. IB_MANDATORY_FUNC(modify_qp),
  250. IB_MANDATORY_FUNC(destroy_qp),
  251. IB_MANDATORY_FUNC(post_send),
  252. IB_MANDATORY_FUNC(post_recv),
  253. IB_MANDATORY_FUNC(create_cq),
  254. IB_MANDATORY_FUNC(destroy_cq),
  255. IB_MANDATORY_FUNC(poll_cq),
  256. IB_MANDATORY_FUNC(req_notify_cq),
  257. IB_MANDATORY_FUNC(get_dma_mr),
  258. IB_MANDATORY_FUNC(dereg_mr),
  259. IB_MANDATORY_FUNC(get_port_immutable)
  260. };
  261. int i;
  262. device->kverbs_provider = true;
  263. for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
  264. if (!*(void **) ((void *) &device->ops +
  265. mandatory_table[i].offset)) {
  266. device->kverbs_provider = false;
  267. break;
  268. }
  269. }
  270. }
  271. /*
  272. * Caller must perform ib_device_put() to return the device reference count
  273. * when ib_device_get_by_index() returns valid device pointer.
  274. */
  275. struct ib_device *ib_device_get_by_index(const struct net *net, u32 index)
  276. {
  277. struct ib_device *device;
  278. down_read(&devices_rwsem);
  279. device = xa_load(&devices, index);
  280. if (device) {
  281. if (!rdma_dev_access_netns(device, net)) {
  282. device = NULL;
  283. goto out;
  284. }
  285. if (!ib_device_try_get(device))
  286. device = NULL;
  287. }
  288. out:
  289. up_read(&devices_rwsem);
  290. return device;
  291. }
  292. /**
  293. * ib_device_put - Release IB device reference
  294. * @device: device whose reference to be released
  295. *
  296. * ib_device_put() releases reference to the IB device to allow it to be
  297. * unregistered and eventually free.
  298. */
  299. void ib_device_put(struct ib_device *device)
  300. {
  301. if (refcount_dec_and_test(&device->refcount))
  302. complete(&device->unreg_completion);
  303. }
  304. EXPORT_SYMBOL(ib_device_put);
  305. static struct ib_device *__ib_device_get_by_name(const char *name)
  306. {
  307. struct ib_device *device;
  308. unsigned long index;
  309. xa_for_each (&devices, index, device)
  310. if (!strcmp(name, dev_name(&device->dev)))
  311. return device;
  312. return NULL;
  313. }
  314. /**
  315. * ib_device_get_by_name - Find an IB device by name
  316. * @name: The name to look for
  317. * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
  318. *
  319. * Find and hold an ib_device by its name. The caller must call
  320. * ib_device_put() on the returned pointer.
  321. */
  322. struct ib_device *ib_device_get_by_name(const char *name,
  323. enum rdma_driver_id driver_id)
  324. {
  325. struct ib_device *device;
  326. down_read(&devices_rwsem);
  327. device = __ib_device_get_by_name(name);
  328. if (device && driver_id != RDMA_DRIVER_UNKNOWN &&
  329. device->ops.driver_id != driver_id)
  330. device = NULL;
  331. if (device) {
  332. if (!ib_device_try_get(device))
  333. device = NULL;
  334. }
  335. up_read(&devices_rwsem);
  336. return device;
  337. }
  338. EXPORT_SYMBOL(ib_device_get_by_name);
  339. static int rename_compat_devs(struct ib_device *device)
  340. {
  341. struct ib_core_device *cdev;
  342. unsigned long index;
  343. int ret = 0;
  344. mutex_lock(&device->compat_devs_mutex);
  345. xa_for_each (&device->compat_devs, index, cdev) {
  346. ret = device_rename(&cdev->dev, dev_name(&device->dev));
  347. if (ret) {
  348. dev_warn(&cdev->dev,
  349. "Fail to rename compatdev to new name %s\n",
  350. dev_name(&device->dev));
  351. break;
  352. }
  353. }
  354. mutex_unlock(&device->compat_devs_mutex);
  355. return ret;
  356. }
  357. int ib_device_rename(struct ib_device *ibdev, const char *name)
  358. {
  359. unsigned long index;
  360. void *client_data;
  361. int ret;
  362. down_write(&devices_rwsem);
  363. if (!strcmp(name, dev_name(&ibdev->dev))) {
  364. up_write(&devices_rwsem);
  365. return 0;
  366. }
  367. if (__ib_device_get_by_name(name)) {
  368. up_write(&devices_rwsem);
  369. return -EEXIST;
  370. }
  371. ret = device_rename(&ibdev->dev, name);
  372. if (ret) {
  373. up_write(&devices_rwsem);
  374. return ret;
  375. }
  376. strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
  377. ret = rename_compat_devs(ibdev);
  378. downgrade_write(&devices_rwsem);
  379. down_read(&ibdev->client_data_rwsem);
  380. xan_for_each_marked(&ibdev->client_data, index, client_data,
  381. CLIENT_DATA_REGISTERED) {
  382. struct ib_client *client = xa_load(&clients, index);
  383. if (!client || !client->rename)
  384. continue;
  385. client->rename(ibdev, client_data);
  386. }
  387. up_read(&ibdev->client_data_rwsem);
  388. up_read(&devices_rwsem);
  389. return 0;
  390. }
  391. int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim)
  392. {
  393. if (use_dim > 1)
  394. return -EINVAL;
  395. ibdev->use_cq_dim = use_dim;
  396. return 0;
  397. }
  398. static int alloc_name(struct ib_device *ibdev, const char *name)
  399. {
  400. struct ib_device *device;
  401. unsigned long index;
  402. struct ida inuse;
  403. int rc;
  404. int i;
  405. lockdep_assert_held_write(&devices_rwsem);
  406. ida_init(&inuse);
  407. xa_for_each (&devices, index, device) {
  408. char buf[IB_DEVICE_NAME_MAX];
  409. if (sscanf(dev_name(&device->dev), name, &i) != 1)
  410. continue;
  411. if (i < 0 || i >= INT_MAX)
  412. continue;
  413. snprintf(buf, sizeof buf, name, i);
  414. if (strcmp(buf, dev_name(&device->dev)) != 0)
  415. continue;
  416. rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL);
  417. if (rc < 0)
  418. goto out;
  419. }
  420. rc = ida_alloc(&inuse, GFP_KERNEL);
  421. if (rc < 0)
  422. goto out;
  423. rc = dev_set_name(&ibdev->dev, name, rc);
  424. out:
  425. ida_destroy(&inuse);
  426. return rc;
  427. }
  428. static void ib_device_release(struct device *device)
  429. {
  430. struct ib_device *dev = container_of(device, struct ib_device, dev);
  431. free_netdevs(dev);
  432. WARN_ON(refcount_read(&dev->refcount));
  433. if (dev->port_data) {
  434. ib_cache_release_one(dev);
  435. ib_security_release_port_pkey_list(dev);
  436. rdma_counter_release(dev);
  437. kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
  438. pdata[0]),
  439. rcu_head);
  440. }
  441. mutex_destroy(&dev->unregistration_lock);
  442. mutex_destroy(&dev->compat_devs_mutex);
  443. xa_destroy(&dev->compat_devs);
  444. xa_destroy(&dev->client_data);
  445. kfree_rcu(dev, rcu_head);
  446. }
  447. static int ib_device_uevent(struct device *device,
  448. struct kobj_uevent_env *env)
  449. {
  450. if (add_uevent_var(env, "NAME=%s", dev_name(device)))
  451. return -ENOMEM;
  452. /*
  453. * It would be nice to pass the node GUID with the event...
  454. */
  455. return 0;
  456. }
  457. static const void *net_namespace(struct device *d)
  458. {
  459. struct ib_core_device *coredev =
  460. container_of(d, struct ib_core_device, dev);
  461. return read_pnet(&coredev->rdma_net);
  462. }
  463. static struct class ib_class = {
  464. .name = "infiniband",
  465. .dev_release = ib_device_release,
  466. .dev_uevent = ib_device_uevent,
  467. .ns_type = &net_ns_type_operations,
  468. .namespace = net_namespace,
  469. };
  470. static void rdma_init_coredev(struct ib_core_device *coredev,
  471. struct ib_device *dev, struct net *net)
  472. {
  473. /* This BUILD_BUG_ON is intended to catch layout change
  474. * of union of ib_core_device and device.
  475. * dev must be the first element as ib_core and providers
  476. * driver uses it. Adding anything in ib_core_device before
  477. * device will break this assumption.
  478. */
  479. BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) !=
  480. offsetof(struct ib_device, dev));
  481. coredev->dev.class = &ib_class;
  482. coredev->dev.groups = dev->groups;
  483. device_initialize(&coredev->dev);
  484. coredev->owner = dev;
  485. INIT_LIST_HEAD(&coredev->port_list);
  486. write_pnet(&coredev->rdma_net, net);
  487. }
  488. /**
  489. * _ib_alloc_device - allocate an IB device struct
  490. * @size:size of structure to allocate
  491. *
  492. * Low-level drivers should use ib_alloc_device() to allocate &struct
  493. * ib_device. @size is the size of the structure to be allocated,
  494. * including any private data used by the low-level driver.
  495. * ib_dealloc_device() must be used to free structures allocated with
  496. * ib_alloc_device().
  497. */
  498. struct ib_device *_ib_alloc_device(size_t size)
  499. {
  500. struct ib_device *device;
  501. if (WARN_ON(size < sizeof(struct ib_device)))
  502. return NULL;
  503. device = kzalloc(size, GFP_KERNEL);
  504. if (!device)
  505. return NULL;
  506. if (rdma_restrack_init(device)) {
  507. kfree(device);
  508. return NULL;
  509. }
  510. device->groups[0] = &ib_dev_attr_group;
  511. rdma_init_coredev(&device->coredev, device, &init_net);
  512. INIT_LIST_HEAD(&device->event_handler_list);
  513. spin_lock_init(&device->qp_open_list_lock);
  514. init_rwsem(&device->event_handler_rwsem);
  515. mutex_init(&device->unregistration_lock);
  516. /*
  517. * client_data needs to be alloc because we don't want our mark to be
  518. * destroyed if the user stores NULL in the client data.
  519. */
  520. xa_init_flags(&device->client_data, XA_FLAGS_ALLOC);
  521. init_rwsem(&device->client_data_rwsem);
  522. xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC);
  523. mutex_init(&device->compat_devs_mutex);
  524. init_completion(&device->unreg_completion);
  525. INIT_WORK(&device->unregistration_work, ib_unregister_work);
  526. return device;
  527. }
  528. EXPORT_SYMBOL(_ib_alloc_device);
  529. /**
  530. * ib_dealloc_device - free an IB device struct
  531. * @device:structure to free
  532. *
  533. * Free a structure allocated with ib_alloc_device().
  534. */
  535. void ib_dealloc_device(struct ib_device *device)
  536. {
  537. if (device->ops.dealloc_driver)
  538. device->ops.dealloc_driver(device);
  539. /*
  540. * ib_unregister_driver() requires all devices to remain in the xarray
  541. * while their ops are callable. The last op we call is dealloc_driver
  542. * above. This is needed to create a fence on op callbacks prior to
  543. * allowing the driver module to unload.
  544. */
  545. down_write(&devices_rwsem);
  546. if (xa_load(&devices, device->index) == device)
  547. xa_erase(&devices, device->index);
  548. up_write(&devices_rwsem);
  549. /* Expedite releasing netdev references */
  550. free_netdevs(device);
  551. WARN_ON(!xa_empty(&device->compat_devs));
  552. WARN_ON(!xa_empty(&device->client_data));
  553. WARN_ON(refcount_read(&device->refcount));
  554. rdma_restrack_clean(device);
  555. /* Balances with device_initialize */
  556. put_device(&device->dev);
  557. }
  558. EXPORT_SYMBOL(ib_dealloc_device);
  559. /*
  560. * add_client_context() and remove_client_context() must be safe against
  561. * parallel calls on the same device - registration/unregistration of both the
  562. * device and client can be occurring in parallel.
  563. *
  564. * The routines need to be a fence, any caller must not return until the add
  565. * or remove is fully completed.
  566. */
  567. static int add_client_context(struct ib_device *device,
  568. struct ib_client *client)
  569. {
  570. int ret = 0;
  571. if (!device->kverbs_provider && !client->no_kverbs_req)
  572. return 0;
  573. down_write(&device->client_data_rwsem);
  574. /*
  575. * So long as the client is registered hold both the client and device
  576. * unregistration locks.
  577. */
  578. if (!refcount_inc_not_zero(&client->uses))
  579. goto out_unlock;
  580. refcount_inc(&device->refcount);
  581. /*
  582. * Another caller to add_client_context got here first and has already
  583. * completely initialized context.
  584. */
  585. if (xa_get_mark(&device->client_data, client->client_id,
  586. CLIENT_DATA_REGISTERED))
  587. goto out;
  588. ret = xa_err(xa_store(&device->client_data, client->client_id, NULL,
  589. GFP_KERNEL));
  590. if (ret)
  591. goto out;
  592. downgrade_write(&device->client_data_rwsem);
  593. if (client->add) {
  594. if (client->add(device)) {
  595. /*
  596. * If a client fails to add then the error code is
  597. * ignored, but we won't call any more ops on this
  598. * client.
  599. */
  600. xa_erase(&device->client_data, client->client_id);
  601. up_read(&device->client_data_rwsem);
  602. ib_device_put(device);
  603. ib_client_put(client);
  604. return 0;
  605. }
  606. }
  607. /* Readers shall not see a client until add has been completed */
  608. xa_set_mark(&device->client_data, client->client_id,
  609. CLIENT_DATA_REGISTERED);
  610. up_read(&device->client_data_rwsem);
  611. return 0;
  612. out:
  613. ib_device_put(device);
  614. ib_client_put(client);
  615. out_unlock:
  616. up_write(&device->client_data_rwsem);
  617. return ret;
  618. }
  619. static void remove_client_context(struct ib_device *device,
  620. unsigned int client_id)
  621. {
  622. struct ib_client *client;
  623. void *client_data;
  624. down_write(&device->client_data_rwsem);
  625. if (!xa_get_mark(&device->client_data, client_id,
  626. CLIENT_DATA_REGISTERED)) {
  627. up_write(&device->client_data_rwsem);
  628. return;
  629. }
  630. client_data = xa_load(&device->client_data, client_id);
  631. xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
  632. client = xa_load(&clients, client_id);
  633. up_write(&device->client_data_rwsem);
  634. /*
  635. * Notice we cannot be holding any exclusive locks when calling the
  636. * remove callback as the remove callback can recurse back into any
  637. * public functions in this module and thus try for any locks those
  638. * functions take.
  639. *
  640. * For this reason clients and drivers should not call the
  641. * unregistration functions will holdling any locks.
  642. */
  643. if (client->remove)
  644. client->remove(device, client_data);
  645. xa_erase(&device->client_data, client_id);
  646. ib_device_put(device);
  647. ib_client_put(client);
  648. }
  649. static int alloc_port_data(struct ib_device *device)
  650. {
  651. struct ib_port_data_rcu *pdata_rcu;
  652. unsigned int port;
  653. if (device->port_data)
  654. return 0;
  655. /* This can only be called once the physical port range is defined */
  656. if (WARN_ON(!device->phys_port_cnt))
  657. return -EINVAL;
  658. /*
  659. * device->port_data is indexed directly by the port number to make
  660. * access to this data as efficient as possible.
  661. *
  662. * Therefore port_data is declared as a 1 based array with potential
  663. * empty slots at the beginning.
  664. */
  665. pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
  666. rdma_end_port(device) + 1),
  667. GFP_KERNEL);
  668. if (!pdata_rcu)
  669. return -ENOMEM;
  670. /*
  671. * The rcu_head is put in front of the port data array and the stored
  672. * pointer is adjusted since we never need to see that member until
  673. * kfree_rcu.
  674. */
  675. device->port_data = pdata_rcu->pdata;
  676. rdma_for_each_port (device, port) {
  677. struct ib_port_data *pdata = &device->port_data[port];
  678. pdata->ib_dev = device;
  679. spin_lock_init(&pdata->pkey_list_lock);
  680. INIT_LIST_HEAD(&pdata->pkey_list);
  681. spin_lock_init(&pdata->netdev_lock);
  682. INIT_HLIST_NODE(&pdata->ndev_hash_link);
  683. }
  684. return 0;
  685. }
  686. static int verify_immutable(const struct ib_device *dev, u8 port)
  687. {
  688. return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
  689. rdma_max_mad_size(dev, port) != 0);
  690. }
  691. static int setup_port_data(struct ib_device *device)
  692. {
  693. unsigned int port;
  694. int ret;
  695. ret = alloc_port_data(device);
  696. if (ret)
  697. return ret;
  698. rdma_for_each_port (device, port) {
  699. struct ib_port_data *pdata = &device->port_data[port];
  700. ret = device->ops.get_port_immutable(device, port,
  701. &pdata->immutable);
  702. if (ret)
  703. return ret;
  704. if (verify_immutable(device, port))
  705. return -EINVAL;
  706. }
  707. return 0;
  708. }
  709. void ib_get_device_fw_str(struct ib_device *dev, char *str)
  710. {
  711. if (dev->ops.get_dev_fw_str)
  712. dev->ops.get_dev_fw_str(dev, str);
  713. else
  714. str[0] = '\0';
  715. }
  716. EXPORT_SYMBOL(ib_get_device_fw_str);
  717. static void ib_policy_change_task(struct work_struct *work)
  718. {
  719. struct ib_device *dev;
  720. unsigned long index;
  721. down_read(&devices_rwsem);
  722. xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
  723. unsigned int i;
  724. rdma_for_each_port (dev, i) {
  725. u64 sp;
  726. int ret = ib_get_cached_subnet_prefix(dev,
  727. i,
  728. &sp);
  729. WARN_ONCE(ret,
  730. "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
  731. ret);
  732. if (!ret)
  733. ib_security_cache_change(dev, i, sp);
  734. }
  735. }
  736. up_read(&devices_rwsem);
  737. }
  738. static int ib_security_change(struct notifier_block *nb, unsigned long event,
  739. void *lsm_data)
  740. {
  741. if (event != LSM_POLICY_CHANGE)
  742. return NOTIFY_DONE;
  743. schedule_work(&ib_policy_change_work);
  744. ib_mad_agent_security_change();
  745. return NOTIFY_OK;
  746. }
  747. static void compatdev_release(struct device *dev)
  748. {
  749. struct ib_core_device *cdev =
  750. container_of(dev, struct ib_core_device, dev);
  751. kfree(cdev);
  752. }
  753. static int add_one_compat_dev(struct ib_device *device,
  754. struct rdma_dev_net *rnet)
  755. {
  756. struct ib_core_device *cdev;
  757. int ret;
  758. lockdep_assert_held(&rdma_nets_rwsem);
  759. if (!ib_devices_shared_netns)
  760. return 0;
  761. /*
  762. * Create and add compat device in all namespaces other than where it
  763. * is currently bound to.
  764. */
  765. if (net_eq(read_pnet(&rnet->net),
  766. read_pnet(&device->coredev.rdma_net)))
  767. return 0;
  768. /*
  769. * The first of init_net() or ib_register_device() to take the
  770. * compat_devs_mutex wins and gets to add the device. Others will wait
  771. * for completion here.
  772. */
  773. mutex_lock(&device->compat_devs_mutex);
  774. cdev = xa_load(&device->compat_devs, rnet->id);
  775. if (cdev) {
  776. ret = 0;
  777. goto done;
  778. }
  779. ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL);
  780. if (ret)
  781. goto done;
  782. cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
  783. if (!cdev) {
  784. ret = -ENOMEM;
  785. goto cdev_err;
  786. }
  787. cdev->dev.parent = device->dev.parent;
  788. rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
  789. cdev->dev.release = compatdev_release;
  790. ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
  791. if (ret)
  792. goto add_err;
  793. ret = device_add(&cdev->dev);
  794. if (ret)
  795. goto add_err;
  796. ret = ib_setup_port_attrs(cdev);
  797. if (ret)
  798. goto port_err;
  799. ret = xa_err(xa_store(&device->compat_devs, rnet->id,
  800. cdev, GFP_KERNEL));
  801. if (ret)
  802. goto insert_err;
  803. mutex_unlock(&device->compat_devs_mutex);
  804. return 0;
  805. insert_err:
  806. ib_free_port_attrs(cdev);
  807. port_err:
  808. device_del(&cdev->dev);
  809. add_err:
  810. put_device(&cdev->dev);
  811. cdev_err:
  812. xa_release(&device->compat_devs, rnet->id);
  813. done:
  814. mutex_unlock(&device->compat_devs_mutex);
  815. return ret;
  816. }
  817. static void remove_one_compat_dev(struct ib_device *device, u32 id)
  818. {
  819. struct ib_core_device *cdev;
  820. mutex_lock(&device->compat_devs_mutex);
  821. cdev = xa_erase(&device->compat_devs, id);
  822. mutex_unlock(&device->compat_devs_mutex);
  823. if (cdev) {
  824. ib_free_port_attrs(cdev);
  825. device_del(&cdev->dev);
  826. put_device(&cdev->dev);
  827. }
  828. }
  829. static void remove_compat_devs(struct ib_device *device)
  830. {
  831. struct ib_core_device *cdev;
  832. unsigned long index;
  833. xa_for_each (&device->compat_devs, index, cdev)
  834. remove_one_compat_dev(device, index);
  835. }
  836. static int add_compat_devs(struct ib_device *device)
  837. {
  838. struct rdma_dev_net *rnet;
  839. unsigned long index;
  840. int ret = 0;
  841. lockdep_assert_held(&devices_rwsem);
  842. down_read(&rdma_nets_rwsem);
  843. xa_for_each (&rdma_nets, index, rnet) {
  844. ret = add_one_compat_dev(device, rnet);
  845. if (ret)
  846. break;
  847. }
  848. up_read(&rdma_nets_rwsem);
  849. return ret;
  850. }
  851. static void remove_all_compat_devs(void)
  852. {
  853. struct ib_compat_device *cdev;
  854. struct ib_device *dev;
  855. unsigned long index;
  856. down_read(&devices_rwsem);
  857. xa_for_each (&devices, index, dev) {
  858. unsigned long c_index = 0;
  859. /* Hold nets_rwsem so that any other thread modifying this
  860. * system param can sync with this thread.
  861. */
  862. down_read(&rdma_nets_rwsem);
  863. xa_for_each (&dev->compat_devs, c_index, cdev)
  864. remove_one_compat_dev(dev, c_index);
  865. up_read(&rdma_nets_rwsem);
  866. }
  867. up_read(&devices_rwsem);
  868. }
  869. static int add_all_compat_devs(void)
  870. {
  871. struct rdma_dev_net *rnet;
  872. struct ib_device *dev;
  873. unsigned long index;
  874. int ret = 0;
  875. down_read(&devices_rwsem);
  876. xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
  877. unsigned long net_index = 0;
  878. /* Hold nets_rwsem so that any other thread modifying this
  879. * system param can sync with this thread.
  880. */
  881. down_read(&rdma_nets_rwsem);
  882. xa_for_each (&rdma_nets, net_index, rnet) {
  883. ret = add_one_compat_dev(dev, rnet);
  884. if (ret)
  885. break;
  886. }
  887. up_read(&rdma_nets_rwsem);
  888. }
  889. up_read(&devices_rwsem);
  890. if (ret)
  891. remove_all_compat_devs();
  892. return ret;
  893. }
  894. int rdma_compatdev_set(u8 enable)
  895. {
  896. struct rdma_dev_net *rnet;
  897. unsigned long index;
  898. int ret = 0;
  899. down_write(&rdma_nets_rwsem);
  900. if (ib_devices_shared_netns == enable) {
  901. up_write(&rdma_nets_rwsem);
  902. return 0;
  903. }
  904. /* enable/disable of compat devices is not supported
  905. * when more than default init_net exists.
  906. */
  907. xa_for_each (&rdma_nets, index, rnet) {
  908. ret++;
  909. break;
  910. }
  911. if (!ret)
  912. ib_devices_shared_netns = enable;
  913. up_write(&rdma_nets_rwsem);
  914. if (ret)
  915. return -EBUSY;
  916. if (enable)
  917. ret = add_all_compat_devs();
  918. else
  919. remove_all_compat_devs();
  920. return ret;
  921. }
  922. static void rdma_dev_exit_net(struct net *net)
  923. {
  924. struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
  925. struct ib_device *dev;
  926. unsigned long index;
  927. int ret;
  928. down_write(&rdma_nets_rwsem);
  929. /*
  930. * Prevent the ID from being re-used and hide the id from xa_for_each.
  931. */
  932. ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL));
  933. WARN_ON(ret);
  934. up_write(&rdma_nets_rwsem);
  935. down_read(&devices_rwsem);
  936. xa_for_each (&devices, index, dev) {
  937. get_device(&dev->dev);
  938. /*
  939. * Release the devices_rwsem so that pontentially blocking
  940. * device_del, doesn't hold the devices_rwsem for too long.
  941. */
  942. up_read(&devices_rwsem);
  943. remove_one_compat_dev(dev, rnet->id);
  944. /*
  945. * If the real device is in the NS then move it back to init.
  946. */
  947. rdma_dev_change_netns(dev, net, &init_net);
  948. put_device(&dev->dev);
  949. down_read(&devices_rwsem);
  950. }
  951. up_read(&devices_rwsem);
  952. rdma_nl_net_exit(rnet);
  953. xa_erase(&rdma_nets, rnet->id);
  954. }
  955. static __net_init int rdma_dev_init_net(struct net *net)
  956. {
  957. struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
  958. unsigned long index;
  959. struct ib_device *dev;
  960. int ret;
  961. write_pnet(&rnet->net, net);
  962. ret = rdma_nl_net_init(rnet);
  963. if (ret)
  964. return ret;
  965. /* No need to create any compat devices in default init_net. */
  966. if (net_eq(net, &init_net))
  967. return 0;
  968. ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
  969. if (ret) {
  970. rdma_nl_net_exit(rnet);
  971. return ret;
  972. }
  973. down_read(&devices_rwsem);
  974. xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
  975. /* Hold nets_rwsem so that netlink command cannot change
  976. * system configuration for device sharing mode.
  977. */
  978. down_read(&rdma_nets_rwsem);
  979. ret = add_one_compat_dev(dev, rnet);
  980. up_read(&rdma_nets_rwsem);
  981. if (ret)
  982. break;
  983. }
  984. up_read(&devices_rwsem);
  985. if (ret)
  986. rdma_dev_exit_net(net);
  987. return ret;
  988. }
  989. /*
  990. * Assign the unique string device name and the unique device index. This is
  991. * undone by ib_dealloc_device.
  992. */
  993. static int assign_name(struct ib_device *device, const char *name)
  994. {
  995. static u32 last_id;
  996. int ret;
  997. down_write(&devices_rwsem);
  998. /* Assign a unique name to the device */
  999. if (strchr(name, '%'))
  1000. ret = alloc_name(device, name);
  1001. else
  1002. ret = dev_set_name(&device->dev, name);
  1003. if (ret)
  1004. goto out;
  1005. if (__ib_device_get_by_name(dev_name(&device->dev))) {
  1006. ret = -ENFILE;
  1007. goto out;
  1008. }
  1009. strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
  1010. ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b,
  1011. &last_id, GFP_KERNEL);
  1012. if (ret > 0)
  1013. ret = 0;
  1014. out:
  1015. up_write(&devices_rwsem);
  1016. return ret;
  1017. }
  1018. /*
  1019. * setup_device() allocates memory and sets up data that requires calling the
  1020. * device ops, this is the only reason these actions are not done during
  1021. * ib_alloc_device. It is undone by ib_dealloc_device().
  1022. */
  1023. static int setup_device(struct ib_device *device)
  1024. {
  1025. struct ib_udata uhw = {.outlen = 0, .inlen = 0};
  1026. int ret;
  1027. ib_device_check_mandatory(device);
  1028. ret = setup_port_data(device);
  1029. if (ret) {
  1030. dev_warn(&device->dev, "Couldn't create per-port data\n");
  1031. return ret;
  1032. }
  1033. memset(&device->attrs, 0, sizeof(device->attrs));
  1034. ret = device->ops.query_device(device, &device->attrs, &uhw);
  1035. if (ret) {
  1036. dev_warn(&device->dev,
  1037. "Couldn't query the device attributes\n");
  1038. return ret;
  1039. }
  1040. return 0;
  1041. }
  1042. static void disable_device(struct ib_device *device)
  1043. {
  1044. u32 cid;
  1045. WARN_ON(!refcount_read(&device->refcount));
  1046. down_write(&devices_rwsem);
  1047. xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
  1048. up_write(&devices_rwsem);
  1049. /*
  1050. * Remove clients in LIFO order, see assign_client_id. This could be
  1051. * more efficient if xarray learns to reverse iterate. Since no new
  1052. * clients can be added to this ib_device past this point we only need
  1053. * the maximum possible client_id value here.
  1054. */
  1055. down_read(&clients_rwsem);
  1056. cid = highest_client_id;
  1057. up_read(&clients_rwsem);
  1058. while (cid) {
  1059. cid--;
  1060. remove_client_context(device, cid);
  1061. }
  1062. ib_cq_pool_destroy(device);
  1063. /* Pairs with refcount_set in enable_device */
  1064. ib_device_put(device);
  1065. wait_for_completion(&device->unreg_completion);
  1066. /*
  1067. * compat devices must be removed after device refcount drops to zero.
  1068. * Otherwise init_net() may add more compatdevs after removing compat
  1069. * devices and before device is disabled.
  1070. */
  1071. remove_compat_devs(device);
  1072. }
  1073. /*
  1074. * An enabled device is visible to all clients and to all the public facing
  1075. * APIs that return a device pointer. This always returns with a new get, even
  1076. * if it fails.
  1077. */
  1078. static int enable_device_and_get(struct ib_device *device)
  1079. {
  1080. struct ib_client *client;
  1081. unsigned long index;
  1082. int ret = 0;
  1083. /*
  1084. * One ref belongs to the xa and the other belongs to this
  1085. * thread. This is needed to guard against parallel unregistration.
  1086. */
  1087. refcount_set(&device->refcount, 2);
  1088. down_write(&devices_rwsem);
  1089. xa_set_mark(&devices, device->index, DEVICE_REGISTERED);
  1090. /*
  1091. * By using downgrade_write() we ensure that no other thread can clear
  1092. * DEVICE_REGISTERED while we are completing the client setup.
  1093. */
  1094. downgrade_write(&devices_rwsem);
  1095. if (device->ops.enable_driver) {
  1096. ret = device->ops.enable_driver(device);
  1097. if (ret)
  1098. goto out;
  1099. }
  1100. ib_cq_pool_init(device);
  1101. down_read(&clients_rwsem);
  1102. xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
  1103. ret = add_client_context(device, client);
  1104. if (ret)
  1105. break;
  1106. }
  1107. up_read(&clients_rwsem);
  1108. if (!ret)
  1109. ret = add_compat_devs(device);
  1110. out:
  1111. up_read(&devices_rwsem);
  1112. return ret;
  1113. }
  1114. static void prevent_dealloc_device(struct ib_device *ib_dev)
  1115. {
  1116. }
  1117. /**
  1118. * ib_register_device - Register an IB device with IB core
  1119. * @device: Device to register
  1120. * @name: unique string device name. This may include a '%' which will
  1121. * cause a unique index to be added to the passed device name.
  1122. * @dma_device: pointer to a DMA-capable device. If %NULL, then the IB
  1123. * device will be used. In this case the caller should fully
  1124. * setup the ibdev for DMA. This usually means using dma_virt_ops.
  1125. *
  1126. * Low-level drivers use ib_register_device() to register their
  1127. * devices with the IB core. All registered clients will receive a
  1128. * callback for each device that is added. @device must be allocated
  1129. * with ib_alloc_device().
  1130. *
  1131. * If the driver uses ops.dealloc_driver and calls any ib_unregister_device()
  1132. * asynchronously then the device pointer may become freed as soon as this
  1133. * function returns.
  1134. */
  1135. int ib_register_device(struct ib_device *device, const char *name,
  1136. struct device *dma_device)
  1137. {
  1138. int ret;
  1139. ret = assign_name(device, name);
  1140. if (ret)
  1141. return ret;
  1142. /*
  1143. * If the caller does not provide a DMA capable device then the IB core
  1144. * will set up ib_sge and scatterlist structures that stash the kernel
  1145. * virtual address into the address field.
  1146. */
  1147. WARN_ON(dma_device && !dma_device->dma_parms);
  1148. device->dma_device = dma_device;
  1149. ret = setup_device(device);
  1150. if (ret)
  1151. return ret;
  1152. ret = ib_cache_setup_one(device);
  1153. if (ret) {
  1154. dev_warn(&device->dev,
  1155. "Couldn't set up InfiniBand P_Key/GID cache\n");
  1156. return ret;
  1157. }
  1158. ib_device_register_rdmacg(device);
  1159. rdma_counter_init(device);
  1160. /*
  1161. * Ensure that ADD uevent is not fired because it
  1162. * is too early amd device is not initialized yet.
  1163. */
  1164. dev_set_uevent_suppress(&device->dev, true);
  1165. ret = device_add(&device->dev);
  1166. if (ret)
  1167. goto cg_cleanup;
  1168. ret = ib_device_register_sysfs(device);
  1169. if (ret) {
  1170. dev_warn(&device->dev,
  1171. "Couldn't register device with driver model\n");
  1172. goto dev_cleanup;
  1173. }
  1174. ret = enable_device_and_get(device);
  1175. if (ret) {
  1176. void (*dealloc_fn)(struct ib_device *);
  1177. /*
  1178. * If we hit this error flow then we don't want to
  1179. * automatically dealloc the device since the caller is
  1180. * expected to call ib_dealloc_device() after
  1181. * ib_register_device() fails. This is tricky due to the
  1182. * possibility for a parallel unregistration along with this
  1183. * error flow. Since we have a refcount here we know any
  1184. * parallel flow is stopped in disable_device and will see the
  1185. * special dealloc_driver pointer, causing the responsibility to
  1186. * ib_dealloc_device() to revert back to this thread.
  1187. */
  1188. dealloc_fn = device->ops.dealloc_driver;
  1189. device->ops.dealloc_driver = prevent_dealloc_device;
  1190. ib_device_put(device);
  1191. __ib_unregister_device(device);
  1192. device->ops.dealloc_driver = dealloc_fn;
  1193. dev_set_uevent_suppress(&device->dev, false);
  1194. return ret;
  1195. }
  1196. dev_set_uevent_suppress(&device->dev, false);
  1197. /* Mark for userspace that device is ready */
  1198. kobject_uevent(&device->dev.kobj, KOBJ_ADD);
  1199. ib_device_put(device);
  1200. return 0;
  1201. dev_cleanup:
  1202. device_del(&device->dev);
  1203. cg_cleanup:
  1204. dev_set_uevent_suppress(&device->dev, false);
  1205. ib_device_unregister_rdmacg(device);
  1206. ib_cache_cleanup_one(device);
  1207. return ret;
  1208. }
  1209. EXPORT_SYMBOL(ib_register_device);
  1210. /* Callers must hold a get on the device. */
  1211. static void __ib_unregister_device(struct ib_device *ib_dev)
  1212. {
  1213. /*
  1214. * We have a registration lock so that all the calls to unregister are
  1215. * fully fenced, once any unregister returns the device is truely
  1216. * unregistered even if multiple callers are unregistering it at the
  1217. * same time. This also interacts with the registration flow and
  1218. * provides sane semantics if register and unregister are racing.
  1219. */
  1220. mutex_lock(&ib_dev->unregistration_lock);
  1221. if (!refcount_read(&ib_dev->refcount))
  1222. goto out;
  1223. disable_device(ib_dev);
  1224. /* Expedite removing unregistered pointers from the hash table */
  1225. free_netdevs(ib_dev);
  1226. ib_device_unregister_sysfs(ib_dev);
  1227. device_del(&ib_dev->dev);
  1228. ib_device_unregister_rdmacg(ib_dev);
  1229. ib_cache_cleanup_one(ib_dev);
  1230. /*
  1231. * Drivers using the new flow may not call ib_dealloc_device except
  1232. * in error unwind prior to registration success.
  1233. */
  1234. if (ib_dev->ops.dealloc_driver &&
  1235. ib_dev->ops.dealloc_driver != prevent_dealloc_device) {
  1236. WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
  1237. ib_dealloc_device(ib_dev);
  1238. }
  1239. out:
  1240. mutex_unlock(&ib_dev->unregistration_lock);
  1241. }
  1242. /**
  1243. * ib_unregister_device - Unregister an IB device
  1244. * @ib_dev: The device to unregister
  1245. *
  1246. * Unregister an IB device. All clients will receive a remove callback.
  1247. *
  1248. * Callers should call this routine only once, and protect against races with
  1249. * registration. Typically it should only be called as part of a remove
  1250. * callback in an implementation of driver core's struct device_driver and
  1251. * related.
  1252. *
  1253. * If ops.dealloc_driver is used then ib_dev will be freed upon return from
  1254. * this function.
  1255. */
  1256. void ib_unregister_device(struct ib_device *ib_dev)
  1257. {
  1258. get_device(&ib_dev->dev);
  1259. __ib_unregister_device(ib_dev);
  1260. put_device(&ib_dev->dev);
  1261. }
  1262. EXPORT_SYMBOL(ib_unregister_device);
  1263. /**
  1264. * ib_unregister_device_and_put - Unregister a device while holding a 'get'
  1265. * @ib_dev: The device to unregister
  1266. *
  1267. * This is the same as ib_unregister_device(), except it includes an internal
  1268. * ib_device_put() that should match a 'get' obtained by the caller.
  1269. *
  1270. * It is safe to call this routine concurrently from multiple threads while
  1271. * holding the 'get'. When the function returns the device is fully
  1272. * unregistered.
  1273. *
  1274. * Drivers using this flow MUST use the driver_unregister callback to clean up
  1275. * their resources associated with the device and dealloc it.
  1276. */
  1277. void ib_unregister_device_and_put(struct ib_device *ib_dev)
  1278. {
  1279. WARN_ON(!ib_dev->ops.dealloc_driver);
  1280. get_device(&ib_dev->dev);
  1281. ib_device_put(ib_dev);
  1282. __ib_unregister_device(ib_dev);
  1283. put_device(&ib_dev->dev);
  1284. }
  1285. EXPORT_SYMBOL(ib_unregister_device_and_put);
  1286. /**
  1287. * ib_unregister_driver - Unregister all IB devices for a driver
  1288. * @driver_id: The driver to unregister
  1289. *
  1290. * This implements a fence for device unregistration. It only returns once all
  1291. * devices associated with the driver_id have fully completed their
  1292. * unregistration and returned from ib_unregister_device*().
  1293. *
  1294. * If device's are not yet unregistered it goes ahead and starts unregistering
  1295. * them.
  1296. *
  1297. * This does not block creation of new devices with the given driver_id, that
  1298. * is the responsibility of the caller.
  1299. */
  1300. void ib_unregister_driver(enum rdma_driver_id driver_id)
  1301. {
  1302. struct ib_device *ib_dev;
  1303. unsigned long index;
  1304. down_read(&devices_rwsem);
  1305. xa_for_each (&devices, index, ib_dev) {
  1306. if (ib_dev->ops.driver_id != driver_id)
  1307. continue;
  1308. get_device(&ib_dev->dev);
  1309. up_read(&devices_rwsem);
  1310. WARN_ON(!ib_dev->ops.dealloc_driver);
  1311. __ib_unregister_device(ib_dev);
  1312. put_device(&ib_dev->dev);
  1313. down_read(&devices_rwsem);
  1314. }
  1315. up_read(&devices_rwsem);
  1316. }
  1317. EXPORT_SYMBOL(ib_unregister_driver);
  1318. static void ib_unregister_work(struct work_struct *work)
  1319. {
  1320. struct ib_device *ib_dev =
  1321. container_of(work, struct ib_device, unregistration_work);
  1322. __ib_unregister_device(ib_dev);
  1323. put_device(&ib_dev->dev);
  1324. }
  1325. /**
  1326. * ib_unregister_device_queued - Unregister a device using a work queue
  1327. * @ib_dev: The device to unregister
  1328. *
  1329. * This schedules an asynchronous unregistration using a WQ for the device. A
  1330. * driver should use this to avoid holding locks while doing unregistration,
  1331. * such as holding the RTNL lock.
  1332. *
  1333. * Drivers using this API must use ib_unregister_driver before module unload
  1334. * to ensure that all scheduled unregistrations have completed.
  1335. */
  1336. void ib_unregister_device_queued(struct ib_device *ib_dev)
  1337. {
  1338. WARN_ON(!refcount_read(&ib_dev->refcount));
  1339. WARN_ON(!ib_dev->ops.dealloc_driver);
  1340. get_device(&ib_dev->dev);
  1341. if (!queue_work(system_unbound_wq, &ib_dev->unregistration_work))
  1342. put_device(&ib_dev->dev);
  1343. }
  1344. EXPORT_SYMBOL(ib_unregister_device_queued);
  1345. /*
  1346. * The caller must pass in a device that has the kref held and the refcount
  1347. * released. If the device is in cur_net and still registered then it is moved
  1348. * into net.
  1349. */
  1350. static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
  1351. struct net *net)
  1352. {
  1353. int ret2 = -EINVAL;
  1354. int ret;
  1355. mutex_lock(&device->unregistration_lock);
  1356. /*
  1357. * If a device not under ib_device_get() or if the unregistration_lock
  1358. * is not held, the namespace can be changed, or it can be unregistered.
  1359. * Check again under the lock.
  1360. */
  1361. if (refcount_read(&device->refcount) == 0 ||
  1362. !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) {
  1363. ret = -ENODEV;
  1364. goto out;
  1365. }
  1366. kobject_uevent(&device->dev.kobj, KOBJ_REMOVE);
  1367. disable_device(device);
  1368. /*
  1369. * At this point no one can be using the device, so it is safe to
  1370. * change the namespace.
  1371. */
  1372. write_pnet(&device->coredev.rdma_net, net);
  1373. down_read(&devices_rwsem);
  1374. /*
  1375. * Currently rdma devices are system wide unique. So the device name
  1376. * is guaranteed free in the new namespace. Publish the new namespace
  1377. * at the sysfs level.
  1378. */
  1379. ret = device_rename(&device->dev, dev_name(&device->dev));
  1380. up_read(&devices_rwsem);
  1381. if (ret) {
  1382. dev_warn(&device->dev,
  1383. "%s: Couldn't rename device after namespace change\n",
  1384. __func__);
  1385. /* Try and put things back and re-enable the device */
  1386. write_pnet(&device->coredev.rdma_net, cur_net);
  1387. }
  1388. ret2 = enable_device_and_get(device);
  1389. if (ret2) {
  1390. /*
  1391. * This shouldn't really happen, but if it does, let the user
  1392. * retry at later point. So don't disable the device.
  1393. */
  1394. dev_warn(&device->dev,
  1395. "%s: Couldn't re-enable device after namespace change\n",
  1396. __func__);
  1397. }
  1398. kobject_uevent(&device->dev.kobj, KOBJ_ADD);
  1399. ib_device_put(device);
  1400. out:
  1401. mutex_unlock(&device->unregistration_lock);
  1402. if (ret)
  1403. return ret;
  1404. return ret2;
  1405. }
  1406. int ib_device_set_netns_put(struct sk_buff *skb,
  1407. struct ib_device *dev, u32 ns_fd)
  1408. {
  1409. struct net *net;
  1410. int ret;
  1411. net = get_net_ns_by_fd(ns_fd);
  1412. if (IS_ERR(net)) {
  1413. ret = PTR_ERR(net);
  1414. goto net_err;
  1415. }
  1416. if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
  1417. ret = -EPERM;
  1418. goto ns_err;
  1419. }
  1420. /*
  1421. * Currently supported only for those providers which support
  1422. * disassociation and don't do port specific sysfs init. Once a
  1423. * port_cleanup infrastructure is implemented, this limitation will be
  1424. * removed.
  1425. */
  1426. if (!dev->ops.disassociate_ucontext || dev->ops.init_port ||
  1427. ib_devices_shared_netns) {
  1428. ret = -EOPNOTSUPP;
  1429. goto ns_err;
  1430. }
  1431. get_device(&dev->dev);
  1432. ib_device_put(dev);
  1433. ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net);
  1434. put_device(&dev->dev);
  1435. put_net(net);
  1436. return ret;
  1437. ns_err:
  1438. put_net(net);
  1439. net_err:
  1440. ib_device_put(dev);
  1441. return ret;
  1442. }
  1443. static struct pernet_operations rdma_dev_net_ops = {
  1444. .init = rdma_dev_init_net,
  1445. .exit = rdma_dev_exit_net,
  1446. .id = &rdma_dev_net_id,
  1447. .size = sizeof(struct rdma_dev_net),
  1448. };
  1449. static int assign_client_id(struct ib_client *client)
  1450. {
  1451. int ret;
  1452. down_write(&clients_rwsem);
  1453. /*
  1454. * The add/remove callbacks must be called in FIFO/LIFO order. To
  1455. * achieve this we assign client_ids so they are sorted in
  1456. * registration order.
  1457. */
  1458. client->client_id = highest_client_id;
  1459. ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
  1460. if (ret)
  1461. goto out;
  1462. highest_client_id++;
  1463. xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
  1464. out:
  1465. up_write(&clients_rwsem);
  1466. return ret;
  1467. }
  1468. static void remove_client_id(struct ib_client *client)
  1469. {
  1470. down_write(&clients_rwsem);
  1471. xa_erase(&clients, client->client_id);
  1472. for (; highest_client_id; highest_client_id--)
  1473. if (xa_load(&clients, highest_client_id - 1))
  1474. break;
  1475. up_write(&clients_rwsem);
  1476. }
  1477. /**
  1478. * ib_register_client - Register an IB client
  1479. * @client:Client to register
  1480. *
  1481. * Upper level users of the IB drivers can use ib_register_client() to
  1482. * register callbacks for IB device addition and removal. When an IB
  1483. * device is added, each registered client's add method will be called
  1484. * (in the order the clients were registered), and when a device is
  1485. * removed, each client's remove method will be called (in the reverse
  1486. * order that clients were registered). In addition, when
  1487. * ib_register_client() is called, the client will receive an add
  1488. * callback for all devices already registered.
  1489. */
  1490. int ib_register_client(struct ib_client *client)
  1491. {
  1492. struct ib_device *device;
  1493. unsigned long index;
  1494. int ret;
  1495. refcount_set(&client->uses, 1);
  1496. init_completion(&client->uses_zero);
  1497. ret = assign_client_id(client);
  1498. if (ret)
  1499. return ret;
  1500. down_read(&devices_rwsem);
  1501. xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
  1502. ret = add_client_context(device, client);
  1503. if (ret) {
  1504. up_read(&devices_rwsem);
  1505. ib_unregister_client(client);
  1506. return ret;
  1507. }
  1508. }
  1509. up_read(&devices_rwsem);
  1510. return 0;
  1511. }
  1512. EXPORT_SYMBOL(ib_register_client);
  1513. /**
  1514. * ib_unregister_client - Unregister an IB client
  1515. * @client:Client to unregister
  1516. *
  1517. * Upper level users use ib_unregister_client() to remove their client
  1518. * registration. When ib_unregister_client() is called, the client
  1519. * will receive a remove callback for each IB device still registered.
  1520. *
  1521. * This is a full fence, once it returns no client callbacks will be called,
  1522. * or are running in another thread.
  1523. */
  1524. void ib_unregister_client(struct ib_client *client)
  1525. {
  1526. struct ib_device *device;
  1527. unsigned long index;
  1528. down_write(&clients_rwsem);
  1529. ib_client_put(client);
  1530. xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
  1531. up_write(&clients_rwsem);
  1532. /* We do not want to have locks while calling client->remove() */
  1533. rcu_read_lock();
  1534. xa_for_each (&devices, index, device) {
  1535. if (!ib_device_try_get(device))
  1536. continue;
  1537. rcu_read_unlock();
  1538. remove_client_context(device, client->client_id);
  1539. ib_device_put(device);
  1540. rcu_read_lock();
  1541. }
  1542. rcu_read_unlock();
  1543. /*
  1544. * remove_client_context() is not a fence, it can return even though a
  1545. * removal is ongoing. Wait until all removals are completed.
  1546. */
  1547. wait_for_completion(&client->uses_zero);
  1548. remove_client_id(client);
  1549. }
  1550. EXPORT_SYMBOL(ib_unregister_client);
  1551. static int __ib_get_global_client_nl_info(const char *client_name,
  1552. struct ib_client_nl_info *res)
  1553. {
  1554. struct ib_client *client;
  1555. unsigned long index;
  1556. int ret = -ENOENT;
  1557. down_read(&clients_rwsem);
  1558. xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
  1559. if (strcmp(client->name, client_name) != 0)
  1560. continue;
  1561. if (!client->get_global_nl_info) {
  1562. ret = -EOPNOTSUPP;
  1563. break;
  1564. }
  1565. ret = client->get_global_nl_info(res);
  1566. if (WARN_ON(ret == -ENOENT))
  1567. ret = -EINVAL;
  1568. if (!ret && res->cdev)
  1569. get_device(res->cdev);
  1570. break;
  1571. }
  1572. up_read(&clients_rwsem);
  1573. return ret;
  1574. }
  1575. static int __ib_get_client_nl_info(struct ib_device *ibdev,
  1576. const char *client_name,
  1577. struct ib_client_nl_info *res)
  1578. {
  1579. unsigned long index;
  1580. void *client_data;
  1581. int ret = -ENOENT;
  1582. down_read(&ibdev->client_data_rwsem);
  1583. xan_for_each_marked (&ibdev->client_data, index, client_data,
  1584. CLIENT_DATA_REGISTERED) {
  1585. struct ib_client *client = xa_load(&clients, index);
  1586. if (!client || strcmp(client->name, client_name) != 0)
  1587. continue;
  1588. if (!client->get_nl_info) {
  1589. ret = -EOPNOTSUPP;
  1590. break;
  1591. }
  1592. ret = client->get_nl_info(ibdev, client_data, res);
  1593. if (WARN_ON(ret == -ENOENT))
  1594. ret = -EINVAL;
  1595. /*
  1596. * The cdev is guaranteed valid as long as we are inside the
  1597. * client_data_rwsem as remove_one can't be called. Keep it
  1598. * valid for the caller.
  1599. */
  1600. if (!ret && res->cdev)
  1601. get_device(res->cdev);
  1602. break;
  1603. }
  1604. up_read(&ibdev->client_data_rwsem);
  1605. return ret;
  1606. }
  1607. /**
  1608. * ib_get_client_nl_info - Fetch the nl_info from a client
  1609. * @device - IB device
  1610. * @client_name - Name of the client
  1611. * @res - Result of the query
  1612. */
  1613. int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name,
  1614. struct ib_client_nl_info *res)
  1615. {
  1616. int ret;
  1617. if (ibdev)
  1618. ret = __ib_get_client_nl_info(ibdev, client_name, res);
  1619. else
  1620. ret = __ib_get_global_client_nl_info(client_name, res);
  1621. #ifdef CONFIG_MODULES
  1622. if (ret == -ENOENT) {
  1623. request_module("rdma-client-%s", client_name);
  1624. if (ibdev)
  1625. ret = __ib_get_client_nl_info(ibdev, client_name, res);
  1626. else
  1627. ret = __ib_get_global_client_nl_info(client_name, res);
  1628. }
  1629. #endif
  1630. if (ret) {
  1631. if (ret == -ENOENT)
  1632. return -EOPNOTSUPP;
  1633. return ret;
  1634. }
  1635. if (WARN_ON(!res->cdev))
  1636. return -EINVAL;
  1637. return 0;
  1638. }
  1639. /**
  1640. * ib_set_client_data - Set IB client context
  1641. * @device:Device to set context for
  1642. * @client:Client to set context for
  1643. * @data:Context to set
  1644. *
  1645. * ib_set_client_data() sets client context data that can be retrieved with
  1646. * ib_get_client_data(). This can only be called while the client is
  1647. * registered to the device, once the ib_client remove() callback returns this
  1648. * cannot be called.
  1649. */
  1650. void ib_set_client_data(struct ib_device *device, struct ib_client *client,
  1651. void *data)
  1652. {
  1653. void *rc;
  1654. if (WARN_ON(IS_ERR(data)))
  1655. data = NULL;
  1656. rc = xa_store(&device->client_data, client->client_id, data,
  1657. GFP_KERNEL);
  1658. WARN_ON(xa_is_err(rc));
  1659. }
  1660. EXPORT_SYMBOL(ib_set_client_data);
  1661. /**
  1662. * ib_register_event_handler - Register an IB event handler
  1663. * @event_handler:Handler to register
  1664. *
  1665. * ib_register_event_handler() registers an event handler that will be
  1666. * called back when asynchronous IB events occur (as defined in
  1667. * chapter 11 of the InfiniBand Architecture Specification). This
  1668. * callback occurs in workqueue context.
  1669. */
  1670. void ib_register_event_handler(struct ib_event_handler *event_handler)
  1671. {
  1672. down_write(&event_handler->device->event_handler_rwsem);
  1673. list_add_tail(&event_handler->list,
  1674. &event_handler->device->event_handler_list);
  1675. up_write(&event_handler->device->event_handler_rwsem);
  1676. }
  1677. EXPORT_SYMBOL(ib_register_event_handler);
  1678. /**
  1679. * ib_unregister_event_handler - Unregister an event handler
  1680. * @event_handler:Handler to unregister
  1681. *
  1682. * Unregister an event handler registered with
  1683. * ib_register_event_handler().
  1684. */
  1685. void ib_unregister_event_handler(struct ib_event_handler *event_handler)
  1686. {
  1687. down_write(&event_handler->device->event_handler_rwsem);
  1688. list_del(&event_handler->list);
  1689. up_write(&event_handler->device->event_handler_rwsem);
  1690. }
  1691. EXPORT_SYMBOL(ib_unregister_event_handler);
  1692. void ib_dispatch_event_clients(struct ib_event *event)
  1693. {
  1694. struct ib_event_handler *handler;
  1695. down_read(&event->device->event_handler_rwsem);
  1696. list_for_each_entry(handler, &event->device->event_handler_list, list)
  1697. handler->handler(handler, event);
  1698. up_read(&event->device->event_handler_rwsem);
  1699. }
  1700. static int iw_query_port(struct ib_device *device,
  1701. u8 port_num,
  1702. struct ib_port_attr *port_attr)
  1703. {
  1704. struct in_device *inetdev;
  1705. struct net_device *netdev;
  1706. memset(port_attr, 0, sizeof(*port_attr));
  1707. netdev = ib_device_get_netdev(device, port_num);
  1708. if (!netdev)
  1709. return -ENODEV;
  1710. port_attr->max_mtu = IB_MTU_4096;
  1711. port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
  1712. if (!netif_carrier_ok(netdev)) {
  1713. port_attr->state = IB_PORT_DOWN;
  1714. port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
  1715. } else {
  1716. rcu_read_lock();
  1717. inetdev = __in_dev_get_rcu(netdev);
  1718. if (inetdev && inetdev->ifa_list) {
  1719. port_attr->state = IB_PORT_ACTIVE;
  1720. port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
  1721. } else {
  1722. port_attr->state = IB_PORT_INIT;
  1723. port_attr->phys_state =
  1724. IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
  1725. }
  1726. rcu_read_unlock();
  1727. }
  1728. dev_put(netdev);
  1729. return device->ops.query_port(device, port_num, port_attr);
  1730. }
  1731. static int __ib_query_port(struct ib_device *device,
  1732. u8 port_num,
  1733. struct ib_port_attr *port_attr)
  1734. {
  1735. union ib_gid gid = {};
  1736. int err;
  1737. memset(port_attr, 0, sizeof(*port_attr));
  1738. err = device->ops.query_port(device, port_num, port_attr);
  1739. if (err || port_attr->subnet_prefix)
  1740. return err;
  1741. if (rdma_port_get_link_layer(device, port_num) !=
  1742. IB_LINK_LAYER_INFINIBAND)
  1743. return 0;
  1744. err = device->ops.query_gid(device, port_num, 0, &gid);
  1745. if (err)
  1746. return err;
  1747. port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
  1748. return 0;
  1749. }
  1750. /**
  1751. * ib_query_port - Query IB port attributes
  1752. * @device:Device to query
  1753. * @port_num:Port number to query
  1754. * @port_attr:Port attributes
  1755. *
  1756. * ib_query_port() returns the attributes of a port through the
  1757. * @port_attr pointer.
  1758. */
  1759. int ib_query_port(struct ib_device *device,
  1760. u8 port_num,
  1761. struct ib_port_attr *port_attr)
  1762. {
  1763. if (!rdma_is_port_valid(device, port_num))
  1764. return -EINVAL;
  1765. if (rdma_protocol_iwarp(device, port_num))
  1766. return iw_query_port(device, port_num, port_attr);
  1767. else
  1768. return __ib_query_port(device, port_num, port_attr);
  1769. }
  1770. EXPORT_SYMBOL(ib_query_port);
  1771. static void add_ndev_hash(struct ib_port_data *pdata)
  1772. {
  1773. unsigned long flags;
  1774. might_sleep();
  1775. spin_lock_irqsave(&ndev_hash_lock, flags);
  1776. if (hash_hashed(&pdata->ndev_hash_link)) {
  1777. hash_del_rcu(&pdata->ndev_hash_link);
  1778. spin_unlock_irqrestore(&ndev_hash_lock, flags);
  1779. /*
  1780. * We cannot do hash_add_rcu after a hash_del_rcu until the
  1781. * grace period
  1782. */
  1783. synchronize_rcu();
  1784. spin_lock_irqsave(&ndev_hash_lock, flags);
  1785. }
  1786. if (pdata->netdev)
  1787. hash_add_rcu(ndev_hash, &pdata->ndev_hash_link,
  1788. (uintptr_t)pdata->netdev);
  1789. spin_unlock_irqrestore(&ndev_hash_lock, flags);
  1790. }
  1791. /**
  1792. * ib_device_set_netdev - Associate the ib_dev with an underlying net_device
  1793. * @ib_dev: Device to modify
  1794. * @ndev: net_device to affiliate, may be NULL
  1795. * @port: IB port the net_device is connected to
  1796. *
  1797. * Drivers should use this to link the ib_device to a netdev so the netdev
  1798. * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be
  1799. * affiliated with any port.
  1800. *
  1801. * The caller must ensure that the given ndev is not unregistered or
  1802. * unregistering, and that either the ib_device is unregistered or
  1803. * ib_device_set_netdev() is called with NULL when the ndev sends a
  1804. * NETDEV_UNREGISTER event.
  1805. */
  1806. int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
  1807. unsigned int port)
  1808. {
  1809. struct net_device *old_ndev;
  1810. struct ib_port_data *pdata;
  1811. unsigned long flags;
  1812. int ret;
  1813. /*
  1814. * Drivers wish to call this before ib_register_driver, so we have to
  1815. * setup the port data early.
  1816. */
  1817. ret = alloc_port_data(ib_dev);
  1818. if (ret)
  1819. return ret;
  1820. if (!rdma_is_port_valid(ib_dev, port))
  1821. return -EINVAL;
  1822. pdata = &ib_dev->port_data[port];
  1823. spin_lock_irqsave(&pdata->netdev_lock, flags);
  1824. old_ndev = rcu_dereference_protected(
  1825. pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
  1826. if (old_ndev == ndev) {
  1827. spin_unlock_irqrestore(&pdata->netdev_lock, flags);
  1828. return 0;
  1829. }
  1830. if (ndev)
  1831. dev_hold(ndev);
  1832. rcu_assign_pointer(pdata->netdev, ndev);
  1833. spin_unlock_irqrestore(&pdata->netdev_lock, flags);
  1834. add_ndev_hash(pdata);
  1835. if (old_ndev)
  1836. dev_put(old_ndev);
  1837. return 0;
  1838. }
  1839. EXPORT_SYMBOL(ib_device_set_netdev);
  1840. static void free_netdevs(struct ib_device *ib_dev)
  1841. {
  1842. unsigned long flags;
  1843. unsigned int port;
  1844. if (!ib_dev->port_data)
  1845. return;
  1846. rdma_for_each_port (ib_dev, port) {
  1847. struct ib_port_data *pdata = &ib_dev->port_data[port];
  1848. struct net_device *ndev;
  1849. spin_lock_irqsave(&pdata->netdev_lock, flags);
  1850. ndev = rcu_dereference_protected(
  1851. pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
  1852. if (ndev) {
  1853. spin_lock(&ndev_hash_lock);
  1854. hash_del_rcu(&pdata->ndev_hash_link);
  1855. spin_unlock(&ndev_hash_lock);
  1856. /*
  1857. * If this is the last dev_put there is still a
  1858. * synchronize_rcu before the netdev is kfreed, so we
  1859. * can continue to rely on unlocked pointer
  1860. * comparisons after the put
  1861. */
  1862. rcu_assign_pointer(pdata->netdev, NULL);
  1863. dev_put(ndev);
  1864. }
  1865. spin_unlock_irqrestore(&pdata->netdev_lock, flags);
  1866. }
  1867. }
  1868. struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
  1869. unsigned int port)
  1870. {
  1871. struct ib_port_data *pdata;
  1872. struct net_device *res;
  1873. if (!rdma_is_port_valid(ib_dev, port))
  1874. return NULL;
  1875. pdata = &ib_dev->port_data[port];
  1876. /*
  1877. * New drivers should use ib_device_set_netdev() not the legacy
  1878. * get_netdev().
  1879. */
  1880. if (ib_dev->ops.get_netdev)
  1881. res = ib_dev->ops.get_netdev(ib_dev, port);
  1882. else {
  1883. spin_lock(&pdata->netdev_lock);
  1884. res = rcu_dereference_protected(
  1885. pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
  1886. if (res)
  1887. dev_hold(res);
  1888. spin_unlock(&pdata->netdev_lock);
  1889. }
  1890. /*
  1891. * If we are starting to unregister expedite things by preventing
  1892. * propagation of an unregistering netdev.
  1893. */
  1894. if (res && res->reg_state != NETREG_REGISTERED) {
  1895. dev_put(res);
  1896. return NULL;
  1897. }
  1898. return res;
  1899. }
  1900. /**
  1901. * ib_device_get_by_netdev - Find an IB device associated with a netdev
  1902. * @ndev: netdev to locate
  1903. * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
  1904. *
  1905. * Find and hold an ib_device that is associated with a netdev via
  1906. * ib_device_set_netdev(). The caller must call ib_device_put() on the
  1907. * returned pointer.
  1908. */
  1909. struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
  1910. enum rdma_driver_id driver_id)
  1911. {
  1912. struct ib_device *res = NULL;
  1913. struct ib_port_data *cur;
  1914. rcu_read_lock();
  1915. hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link,
  1916. (uintptr_t)ndev) {
  1917. if (rcu_access_pointer(cur->netdev) == ndev &&
  1918. (driver_id == RDMA_DRIVER_UNKNOWN ||
  1919. cur->ib_dev->ops.driver_id == driver_id) &&
  1920. ib_device_try_get(cur->ib_dev)) {
  1921. res = cur->ib_dev;
  1922. break;
  1923. }
  1924. }
  1925. rcu_read_unlock();
  1926. return res;
  1927. }
  1928. EXPORT_SYMBOL(ib_device_get_by_netdev);
  1929. /**
  1930. * ib_enum_roce_netdev - enumerate all RoCE ports
  1931. * @ib_dev : IB device we want to query
  1932. * @filter: Should we call the callback?
  1933. * @filter_cookie: Cookie passed to filter
  1934. * @cb: Callback to call for each found RoCE ports
  1935. * @cookie: Cookie passed back to the callback
  1936. *
  1937. * Enumerates all of the physical RoCE ports of ib_dev
  1938. * which are related to netdevice and calls callback() on each
  1939. * device for which filter() function returns non zero.
  1940. */
  1941. void ib_enum_roce_netdev(struct ib_device *ib_dev,
  1942. roce_netdev_filter filter,
  1943. void *filter_cookie,
  1944. roce_netdev_callback cb,
  1945. void *cookie)
  1946. {
  1947. unsigned int port;
  1948. rdma_for_each_port (ib_dev, port)
  1949. if (rdma_protocol_roce(ib_dev, port)) {
  1950. struct net_device *idev =
  1951. ib_device_get_netdev(ib_dev, port);
  1952. if (filter(ib_dev, port, idev, filter_cookie))
  1953. cb(ib_dev, port, idev, cookie);
  1954. if (idev)
  1955. dev_put(idev);
  1956. }
  1957. }
  1958. /**
  1959. * ib_enum_all_roce_netdevs - enumerate all RoCE devices
  1960. * @filter: Should we call the callback?
  1961. * @filter_cookie: Cookie passed to filter
  1962. * @cb: Callback to call for each found RoCE ports
  1963. * @cookie: Cookie passed back to the callback
  1964. *
  1965. * Enumerates all RoCE devices' physical ports which are related
  1966. * to netdevices and calls callback() on each device for which
  1967. * filter() function returns non zero.
  1968. */
  1969. void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
  1970. void *filter_cookie,
  1971. roce_netdev_callback cb,
  1972. void *cookie)
  1973. {
  1974. struct ib_device *dev;
  1975. unsigned long index;
  1976. down_read(&devices_rwsem);
  1977. xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED)
  1978. ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
  1979. up_read(&devices_rwsem);
  1980. }
  1981. /**
  1982. * ib_enum_all_devs - enumerate all ib_devices
  1983. * @cb: Callback to call for each found ib_device
  1984. *
  1985. * Enumerates all ib_devices and calls callback() on each device.
  1986. */
  1987. int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
  1988. struct netlink_callback *cb)
  1989. {
  1990. unsigned long index;
  1991. struct ib_device *dev;
  1992. unsigned int idx = 0;
  1993. int ret = 0;
  1994. down_read(&devices_rwsem);
  1995. xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
  1996. if (!rdma_dev_access_netns(dev, sock_net(skb->sk)))
  1997. continue;
  1998. ret = nldev_cb(dev, skb, cb, idx);
  1999. if (ret)
  2000. break;
  2001. idx++;
  2002. }
  2003. up_read(&devices_rwsem);
  2004. return ret;
  2005. }
  2006. /**
  2007. * ib_query_pkey - Get P_Key table entry
  2008. * @device:Device to query
  2009. * @port_num:Port number to query
  2010. * @index:P_Key table index to query
  2011. * @pkey:Returned P_Key
  2012. *
  2013. * ib_query_pkey() fetches the specified P_Key table entry.
  2014. */
  2015. int ib_query_pkey(struct ib_device *device,
  2016. u8 port_num, u16 index, u16 *pkey)
  2017. {
  2018. if (!rdma_is_port_valid(device, port_num))
  2019. return -EINVAL;
  2020. if (!device->ops.query_pkey)
  2021. return -EOPNOTSUPP;
  2022. return device->ops.query_pkey(device, port_num, index, pkey);
  2023. }
  2024. EXPORT_SYMBOL(ib_query_pkey);
  2025. /**
  2026. * ib_modify_device - Change IB device attributes
  2027. * @device:Device to modify
  2028. * @device_modify_mask:Mask of attributes to change
  2029. * @device_modify:New attribute values
  2030. *
  2031. * ib_modify_device() changes a device's attributes as specified by
  2032. * the @device_modify_mask and @device_modify structure.
  2033. */
  2034. int ib_modify_device(struct ib_device *device,
  2035. int device_modify_mask,
  2036. struct ib_device_modify *device_modify)
  2037. {
  2038. if (!device->ops.modify_device)
  2039. return -EOPNOTSUPP;
  2040. return device->ops.modify_device(device, device_modify_mask,
  2041. device_modify);
  2042. }
  2043. EXPORT_SYMBOL(ib_modify_device);
  2044. /**
  2045. * ib_modify_port - Modifies the attributes for the specified port.
  2046. * @device: The device to modify.
  2047. * @port_num: The number of the port to modify.
  2048. * @port_modify_mask: Mask used to specify which attributes of the port
  2049. * to change.
  2050. * @port_modify: New attribute values for the port.
  2051. *
  2052. * ib_modify_port() changes a port's attributes as specified by the
  2053. * @port_modify_mask and @port_modify structure.
  2054. */
  2055. int ib_modify_port(struct ib_device *device,
  2056. u8 port_num, int port_modify_mask,
  2057. struct ib_port_modify *port_modify)
  2058. {
  2059. int rc;
  2060. if (!rdma_is_port_valid(device, port_num))
  2061. return -EINVAL;
  2062. if (device->ops.modify_port)
  2063. rc = device->ops.modify_port(device, port_num,
  2064. port_modify_mask,
  2065. port_modify);
  2066. else if (rdma_protocol_roce(device, port_num) &&
  2067. ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 ||
  2068. (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0))
  2069. rc = 0;
  2070. else
  2071. rc = -EOPNOTSUPP;
  2072. return rc;
  2073. }
  2074. EXPORT_SYMBOL(ib_modify_port);
  2075. /**
  2076. * ib_find_gid - Returns the port number and GID table index where
  2077. * a specified GID value occurs. Its searches only for IB link layer.
  2078. * @device: The device to query.
  2079. * @gid: The GID value to search for.
  2080. * @port_num: The port number of the device where the GID value was found.
  2081. * @index: The index into the GID table where the GID was found. This
  2082. * parameter may be NULL.
  2083. */
  2084. int ib_find_gid(struct ib_device *device, union ib_gid *gid,
  2085. u8 *port_num, u16 *index)
  2086. {
  2087. union ib_gid tmp_gid;
  2088. unsigned int port;
  2089. int ret, i;
  2090. rdma_for_each_port (device, port) {
  2091. if (!rdma_protocol_ib(device, port))
  2092. continue;
  2093. for (i = 0; i < device->port_data[port].immutable.gid_tbl_len;
  2094. ++i) {
  2095. ret = rdma_query_gid(device, port, i, &tmp_gid);
  2096. if (ret)
  2097. continue;
  2098. if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
  2099. *port_num = port;
  2100. if (index)
  2101. *index = i;
  2102. return 0;
  2103. }
  2104. }
  2105. }
  2106. return -ENOENT;
  2107. }
  2108. EXPORT_SYMBOL(ib_find_gid);
  2109. /**
  2110. * ib_find_pkey - Returns the PKey table index where a specified
  2111. * PKey value occurs.
  2112. * @device: The device to query.
  2113. * @port_num: The port number of the device to search for the PKey.
  2114. * @pkey: The PKey value to search for.
  2115. * @index: The index into the PKey table where the PKey was found.
  2116. */
  2117. int ib_find_pkey(struct ib_device *device,
  2118. u8 port_num, u16 pkey, u16 *index)
  2119. {
  2120. int ret, i;
  2121. u16 tmp_pkey;
  2122. int partial_ix = -1;
  2123. for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len;
  2124. ++i) {
  2125. ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
  2126. if (ret)
  2127. return ret;
  2128. if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
  2129. /* if there is full-member pkey take it.*/
  2130. if (tmp_pkey & 0x8000) {
  2131. *index = i;
  2132. return 0;
  2133. }
  2134. if (partial_ix < 0)
  2135. partial_ix = i;
  2136. }
  2137. }
  2138. /*no full-member, if exists take the limited*/
  2139. if (partial_ix >= 0) {
  2140. *index = partial_ix;
  2141. return 0;
  2142. }
  2143. return -ENOENT;
  2144. }
  2145. EXPORT_SYMBOL(ib_find_pkey);
  2146. /**
  2147. * ib_get_net_dev_by_params() - Return the appropriate net_dev
  2148. * for a received CM request
  2149. * @dev: An RDMA device on which the request has been received.
  2150. * @port: Port number on the RDMA device.
  2151. * @pkey: The Pkey the request came on.
  2152. * @gid: A GID that the net_dev uses to communicate.
  2153. * @addr: Contains the IP address that the request specified as its
  2154. * destination.
  2155. *
  2156. */
  2157. struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
  2158. u8 port,
  2159. u16 pkey,
  2160. const union ib_gid *gid,
  2161. const struct sockaddr *addr)
  2162. {
  2163. struct net_device *net_dev = NULL;
  2164. unsigned long index;
  2165. void *client_data;
  2166. if (!rdma_protocol_ib(dev, port))
  2167. return NULL;
  2168. /*
  2169. * Holding the read side guarantees that the client will not become
  2170. * unregistered while we are calling get_net_dev_by_params()
  2171. */
  2172. down_read(&dev->client_data_rwsem);
  2173. xan_for_each_marked (&dev->client_data, index, client_data,
  2174. CLIENT_DATA_REGISTERED) {
  2175. struct ib_client *client = xa_load(&clients, index);
  2176. if (!client || !client->get_net_dev_by_params)
  2177. continue;
  2178. net_dev = client->get_net_dev_by_params(dev, port, pkey, gid,
  2179. addr, client_data);
  2180. if (net_dev)
  2181. break;
  2182. }
  2183. up_read(&dev->client_data_rwsem);
  2184. return net_dev;
  2185. }
  2186. EXPORT_SYMBOL(ib_get_net_dev_by_params);
  2187. void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
  2188. {
  2189. struct ib_device_ops *dev_ops = &dev->ops;
  2190. #define SET_DEVICE_OP(ptr, name) \
  2191. do { \
  2192. if (ops->name) \
  2193. if (!((ptr)->name)) \
  2194. (ptr)->name = ops->name; \
  2195. } while (0)
  2196. #define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)
  2197. if (ops->driver_id != RDMA_DRIVER_UNKNOWN) {
  2198. WARN_ON(dev_ops->driver_id != RDMA_DRIVER_UNKNOWN &&
  2199. dev_ops->driver_id != ops->driver_id);
  2200. dev_ops->driver_id = ops->driver_id;
  2201. }
  2202. if (ops->owner) {
  2203. WARN_ON(dev_ops->owner && dev_ops->owner != ops->owner);
  2204. dev_ops->owner = ops->owner;
  2205. }
  2206. if (ops->uverbs_abi_ver)
  2207. dev_ops->uverbs_abi_ver = ops->uverbs_abi_ver;
  2208. dev_ops->uverbs_no_driver_id_binding |=
  2209. ops->uverbs_no_driver_id_binding;
  2210. SET_DEVICE_OP(dev_ops, add_gid);
  2211. SET_DEVICE_OP(dev_ops, advise_mr);
  2212. SET_DEVICE_OP(dev_ops, alloc_dm);
  2213. SET_DEVICE_OP(dev_ops, alloc_hw_stats);
  2214. SET_DEVICE_OP(dev_ops, alloc_mr);
  2215. SET_DEVICE_OP(dev_ops, alloc_mr_integrity);
  2216. SET_DEVICE_OP(dev_ops, alloc_mw);
  2217. SET_DEVICE_OP(dev_ops, alloc_pd);
  2218. SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
  2219. SET_DEVICE_OP(dev_ops, alloc_ucontext);
  2220. SET_DEVICE_OP(dev_ops, alloc_xrcd);
  2221. SET_DEVICE_OP(dev_ops, attach_mcast);
  2222. SET_DEVICE_OP(dev_ops, check_mr_status);
  2223. SET_DEVICE_OP(dev_ops, counter_alloc_stats);
  2224. SET_DEVICE_OP(dev_ops, counter_bind_qp);
  2225. SET_DEVICE_OP(dev_ops, counter_dealloc);
  2226. SET_DEVICE_OP(dev_ops, counter_unbind_qp);
  2227. SET_DEVICE_OP(dev_ops, counter_update_stats);
  2228. SET_DEVICE_OP(dev_ops, create_ah);
  2229. SET_DEVICE_OP(dev_ops, create_counters);
  2230. SET_DEVICE_OP(dev_ops, create_cq);
  2231. SET_DEVICE_OP(dev_ops, create_flow);
  2232. SET_DEVICE_OP(dev_ops, create_flow_action_esp);
  2233. SET_DEVICE_OP(dev_ops, create_qp);
  2234. SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
  2235. SET_DEVICE_OP(dev_ops, create_srq);
  2236. SET_DEVICE_OP(dev_ops, create_wq);
  2237. SET_DEVICE_OP(dev_ops, dealloc_dm);
  2238. SET_DEVICE_OP(dev_ops, dealloc_driver);
  2239. SET_DEVICE_OP(dev_ops, dealloc_mw);
  2240. SET_DEVICE_OP(dev_ops, dealloc_pd);
  2241. SET_DEVICE_OP(dev_ops, dealloc_ucontext);
  2242. SET_DEVICE_OP(dev_ops, dealloc_xrcd);
  2243. SET_DEVICE_OP(dev_ops, del_gid);
  2244. SET_DEVICE_OP(dev_ops, dereg_mr);
  2245. SET_DEVICE_OP(dev_ops, destroy_ah);
  2246. SET_DEVICE_OP(dev_ops, destroy_counters);
  2247. SET_DEVICE_OP(dev_ops, destroy_cq);
  2248. SET_DEVICE_OP(dev_ops, destroy_flow);
  2249. SET_DEVICE_OP(dev_ops, destroy_flow_action);
  2250. SET_DEVICE_OP(dev_ops, destroy_qp);
  2251. SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
  2252. SET_DEVICE_OP(dev_ops, destroy_srq);
  2253. SET_DEVICE_OP(dev_ops, destroy_wq);
  2254. SET_DEVICE_OP(dev_ops, detach_mcast);
  2255. SET_DEVICE_OP(dev_ops, disassociate_ucontext);
  2256. SET_DEVICE_OP(dev_ops, drain_rq);
  2257. SET_DEVICE_OP(dev_ops, drain_sq);
  2258. SET_DEVICE_OP(dev_ops, enable_driver);
  2259. SET_DEVICE_OP(dev_ops, fill_res_cm_id_entry);
  2260. SET_DEVICE_OP(dev_ops, fill_res_cq_entry);
  2261. SET_DEVICE_OP(dev_ops, fill_res_cq_entry_raw);
  2262. SET_DEVICE_OP(dev_ops, fill_res_mr_entry);
  2263. SET_DEVICE_OP(dev_ops, fill_res_mr_entry_raw);
  2264. SET_DEVICE_OP(dev_ops, fill_res_qp_entry);
  2265. SET_DEVICE_OP(dev_ops, fill_res_qp_entry_raw);
  2266. SET_DEVICE_OP(dev_ops, fill_stat_mr_entry);
  2267. SET_DEVICE_OP(dev_ops, get_dev_fw_str);
  2268. SET_DEVICE_OP(dev_ops, get_dma_mr);
  2269. SET_DEVICE_OP(dev_ops, get_hw_stats);
  2270. SET_DEVICE_OP(dev_ops, get_link_layer);
  2271. SET_DEVICE_OP(dev_ops, get_netdev);
  2272. SET_DEVICE_OP(dev_ops, get_port_immutable);
  2273. SET_DEVICE_OP(dev_ops, get_vector_affinity);
  2274. SET_DEVICE_OP(dev_ops, get_vf_config);
  2275. SET_DEVICE_OP(dev_ops, get_vf_guid);
  2276. SET_DEVICE_OP(dev_ops, get_vf_stats);
  2277. SET_DEVICE_OP(dev_ops, init_port);
  2278. SET_DEVICE_OP(dev_ops, iw_accept);
  2279. SET_DEVICE_OP(dev_ops, iw_add_ref);
  2280. SET_DEVICE_OP(dev_ops, iw_connect);
  2281. SET_DEVICE_OP(dev_ops, iw_create_listen);
  2282. SET_DEVICE_OP(dev_ops, iw_destroy_listen);
  2283. SET_DEVICE_OP(dev_ops, iw_get_qp);
  2284. SET_DEVICE_OP(dev_ops, iw_reject);
  2285. SET_DEVICE_OP(dev_ops, iw_rem_ref);
  2286. SET_DEVICE_OP(dev_ops, map_mr_sg);
  2287. SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
  2288. SET_DEVICE_OP(dev_ops, mmap);
  2289. SET_DEVICE_OP(dev_ops, mmap_free);
  2290. SET_DEVICE_OP(dev_ops, modify_ah);
  2291. SET_DEVICE_OP(dev_ops, modify_cq);
  2292. SET_DEVICE_OP(dev_ops, modify_device);
  2293. SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
  2294. SET_DEVICE_OP(dev_ops, modify_port);
  2295. SET_DEVICE_OP(dev_ops, modify_qp);
  2296. SET_DEVICE_OP(dev_ops, modify_srq);
  2297. SET_DEVICE_OP(dev_ops, modify_wq);
  2298. SET_DEVICE_OP(dev_ops, peek_cq);
  2299. SET_DEVICE_OP(dev_ops, poll_cq);
  2300. SET_DEVICE_OP(dev_ops, post_recv);
  2301. SET_DEVICE_OP(dev_ops, post_send);
  2302. SET_DEVICE_OP(dev_ops, post_srq_recv);
  2303. SET_DEVICE_OP(dev_ops, process_mad);
  2304. SET_DEVICE_OP(dev_ops, query_ah);
  2305. SET_DEVICE_OP(dev_ops, query_device);
  2306. SET_DEVICE_OP(dev_ops, query_gid);
  2307. SET_DEVICE_OP(dev_ops, query_pkey);
  2308. SET_DEVICE_OP(dev_ops, query_port);
  2309. SET_DEVICE_OP(dev_ops, query_qp);
  2310. SET_DEVICE_OP(dev_ops, query_srq);
  2311. SET_DEVICE_OP(dev_ops, query_ucontext);
  2312. SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
  2313. SET_DEVICE_OP(dev_ops, read_counters);
  2314. SET_DEVICE_OP(dev_ops, reg_dm_mr);
  2315. SET_DEVICE_OP(dev_ops, reg_user_mr);
  2316. SET_DEVICE_OP(dev_ops, req_ncomp_notif);
  2317. SET_DEVICE_OP(dev_ops, req_notify_cq);
  2318. SET_DEVICE_OP(dev_ops, rereg_user_mr);
  2319. SET_DEVICE_OP(dev_ops, resize_cq);
  2320. SET_DEVICE_OP(dev_ops, set_vf_guid);
  2321. SET_DEVICE_OP(dev_ops, set_vf_link_state);
  2322. SET_OBJ_SIZE(dev_ops, ib_ah);
  2323. SET_OBJ_SIZE(dev_ops, ib_counters);
  2324. SET_OBJ_SIZE(dev_ops, ib_cq);
  2325. SET_OBJ_SIZE(dev_ops, ib_mw);
  2326. SET_OBJ_SIZE(dev_ops, ib_pd);
  2327. SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
  2328. SET_OBJ_SIZE(dev_ops, ib_srq);
  2329. SET_OBJ_SIZE(dev_ops, ib_ucontext);
  2330. SET_OBJ_SIZE(dev_ops, ib_xrcd);
  2331. }
  2332. EXPORT_SYMBOL(ib_set_device_ops);
  2333. #ifdef CONFIG_INFINIBAND_VIRT_DMA
  2334. int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents)
  2335. {
  2336. struct scatterlist *s;
  2337. int i;
  2338. for_each_sg(sg, s, nents, i) {
  2339. sg_dma_address(s) = (uintptr_t)sg_virt(s);
  2340. sg_dma_len(s) = s->length;
  2341. }
  2342. return nents;
  2343. }
  2344. EXPORT_SYMBOL(ib_dma_virt_map_sg);
  2345. #endif /* CONFIG_INFINIBAND_VIRT_DMA */
  2346. static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
  2347. [RDMA_NL_LS_OP_RESOLVE] = {
  2348. .doit = ib_nl_handle_resolve_resp,
  2349. .flags = RDMA_NL_ADMIN_PERM,
  2350. },
  2351. [RDMA_NL_LS_OP_SET_TIMEOUT] = {
  2352. .doit = ib_nl_handle_set_timeout,
  2353. .flags = RDMA_NL_ADMIN_PERM,
  2354. },
  2355. [RDMA_NL_LS_OP_IP_RESOLVE] = {
  2356. .doit = ib_nl_handle_ip_res_resp,
  2357. .flags = RDMA_NL_ADMIN_PERM,
  2358. },
  2359. };
  2360. static int __init ib_core_init(void)
  2361. {
  2362. int ret;
  2363. ib_wq = alloc_workqueue("infiniband", 0, 0);
  2364. if (!ib_wq)
  2365. return -ENOMEM;
  2366. ib_comp_wq = alloc_workqueue("ib-comp-wq",
  2367. WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
  2368. if (!ib_comp_wq) {
  2369. ret = -ENOMEM;
  2370. goto err;
  2371. }
  2372. ib_comp_unbound_wq =
  2373. alloc_workqueue("ib-comp-unb-wq",
  2374. WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
  2375. WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
  2376. if (!ib_comp_unbound_wq) {
  2377. ret = -ENOMEM;
  2378. goto err_comp;
  2379. }
  2380. ret = class_register(&ib_class);
  2381. if (ret) {
  2382. pr_warn("Couldn't create InfiniBand device class\n");
  2383. goto err_comp_unbound;
  2384. }
  2385. rdma_nl_init();
  2386. ret = addr_init();
  2387. if (ret) {
  2388. pr_warn("Couldn't init IB address resolution\n");
  2389. goto err_ibnl;
  2390. }
  2391. ret = ib_mad_init();
  2392. if (ret) {
  2393. pr_warn("Couldn't init IB MAD\n");
  2394. goto err_addr;
  2395. }
  2396. ret = ib_sa_init();
  2397. if (ret) {
  2398. pr_warn("Couldn't init SA\n");
  2399. goto err_mad;
  2400. }
  2401. ret = register_blocking_lsm_notifier(&ibdev_lsm_nb);
  2402. if (ret) {
  2403. pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
  2404. goto err_sa;
  2405. }
  2406. ret = register_pernet_device(&rdma_dev_net_ops);
  2407. if (ret) {
  2408. pr_warn("Couldn't init compat dev. ret %d\n", ret);
  2409. goto err_compat;
  2410. }
  2411. nldev_init();
  2412. rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
  2413. roce_gid_mgmt_init();
  2414. return 0;
  2415. err_compat:
  2416. unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
  2417. err_sa:
  2418. ib_sa_cleanup();
  2419. err_mad:
  2420. ib_mad_cleanup();
  2421. err_addr:
  2422. addr_cleanup();
  2423. err_ibnl:
  2424. class_unregister(&ib_class);
  2425. err_comp_unbound:
  2426. destroy_workqueue(ib_comp_unbound_wq);
  2427. err_comp:
  2428. destroy_workqueue(ib_comp_wq);
  2429. err:
  2430. destroy_workqueue(ib_wq);
  2431. return ret;
  2432. }
  2433. static void __exit ib_core_cleanup(void)
  2434. {
  2435. roce_gid_mgmt_cleanup();
  2436. nldev_exit();
  2437. rdma_nl_unregister(RDMA_NL_LS);
  2438. unregister_pernet_device(&rdma_dev_net_ops);
  2439. unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
  2440. ib_sa_cleanup();
  2441. ib_mad_cleanup();
  2442. addr_cleanup();
  2443. rdma_nl_exit();
  2444. class_unregister(&ib_class);
  2445. destroy_workqueue(ib_comp_unbound_wq);
  2446. destroy_workqueue(ib_comp_wq);
  2447. /* Make sure that any pending umem accounting work is done. */
  2448. destroy_workqueue(ib_wq);
  2449. flush_workqueue(system_unbound_wq);
  2450. WARN_ON(!xa_empty(&clients));
  2451. WARN_ON(!xa_empty(&devices));
  2452. }
  2453. MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
  2454. /* ib core relies on netdev stack to first register net_ns_type_operations
  2455. * ns kobject type before ib_core initialization.
  2456. */
  2457. fs_initcall(ib_core_init);
  2458. module_exit(ib_core_cleanup);