security.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/security.h>
  33. #include <linux/completion.h>
  34. #include <linux/list.h>
  35. #include <rdma/ib_verbs.h>
  36. #include <rdma/ib_cache.h>
  37. #include "core_priv.h"
  38. #include "mad_priv.h"
  39. static LIST_HEAD(mad_agent_list);
  40. /* Lock to protect mad_agent_list */
  41. static DEFINE_SPINLOCK(mad_agent_list_lock);
  42. static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
  43. {
  44. struct pkey_index_qp_list *pkey = NULL;
  45. struct pkey_index_qp_list *tmp_pkey;
  46. struct ib_device *dev = pp->sec->dev;
  47. spin_lock(&dev->port_data[pp->port_num].pkey_list_lock);
  48. list_for_each_entry (tmp_pkey, &dev->port_data[pp->port_num].pkey_list,
  49. pkey_index_list) {
  50. if (tmp_pkey->pkey_index == pp->pkey_index) {
  51. pkey = tmp_pkey;
  52. break;
  53. }
  54. }
  55. spin_unlock(&dev->port_data[pp->port_num].pkey_list_lock);
  56. return pkey;
  57. }
  58. static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
  59. u16 *pkey,
  60. u64 *subnet_prefix)
  61. {
  62. struct ib_device *dev = pp->sec->dev;
  63. int ret;
  64. ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
  65. if (ret)
  66. return ret;
  67. ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
  68. return ret;
  69. }
  70. static int enforce_qp_pkey_security(u16 pkey,
  71. u64 subnet_prefix,
  72. struct ib_qp_security *qp_sec)
  73. {
  74. struct ib_qp_security *shared_qp_sec;
  75. int ret;
  76. ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
  77. if (ret)
  78. return ret;
  79. list_for_each_entry(shared_qp_sec,
  80. &qp_sec->shared_qp_list,
  81. shared_qp_list) {
  82. ret = security_ib_pkey_access(shared_qp_sec->security,
  83. subnet_prefix,
  84. pkey);
  85. if (ret)
  86. return ret;
  87. }
  88. return 0;
  89. }
  90. /* The caller of this function must hold the QP security
  91. * mutex of the QP of the security structure in *pps.
  92. *
  93. * It takes separate ports_pkeys and security structure
  94. * because in some cases the pps will be for a new settings
  95. * or the pps will be for the real QP and security structure
  96. * will be for a shared QP.
  97. */
  98. static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
  99. struct ib_qp_security *sec)
  100. {
  101. u64 subnet_prefix;
  102. u16 pkey;
  103. int ret = 0;
  104. if (!pps)
  105. return 0;
  106. if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
  107. ret = get_pkey_and_subnet_prefix(&pps->main,
  108. &pkey,
  109. &subnet_prefix);
  110. if (ret)
  111. return ret;
  112. ret = enforce_qp_pkey_security(pkey,
  113. subnet_prefix,
  114. sec);
  115. if (ret)
  116. return ret;
  117. }
  118. if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
  119. ret = get_pkey_and_subnet_prefix(&pps->alt,
  120. &pkey,
  121. &subnet_prefix);
  122. if (ret)
  123. return ret;
  124. ret = enforce_qp_pkey_security(pkey,
  125. subnet_prefix,
  126. sec);
  127. }
  128. return ret;
  129. }
  130. /* The caller of this function must hold the QP security
  131. * mutex.
  132. */
  133. static void qp_to_error(struct ib_qp_security *sec)
  134. {
  135. struct ib_qp_security *shared_qp_sec;
  136. struct ib_qp_attr attr = {
  137. .qp_state = IB_QPS_ERR
  138. };
  139. struct ib_event event = {
  140. .event = IB_EVENT_QP_FATAL
  141. };
  142. /* If the QP is in the process of being destroyed
  143. * the qp pointer in the security structure is
  144. * undefined. It cannot be modified now.
  145. */
  146. if (sec->destroying)
  147. return;
  148. ib_modify_qp(sec->qp,
  149. &attr,
  150. IB_QP_STATE);
  151. if (sec->qp->event_handler && sec->qp->qp_context) {
  152. event.element.qp = sec->qp;
  153. sec->qp->event_handler(&event,
  154. sec->qp->qp_context);
  155. }
  156. list_for_each_entry(shared_qp_sec,
  157. &sec->shared_qp_list,
  158. shared_qp_list) {
  159. struct ib_qp *qp = shared_qp_sec->qp;
  160. if (qp->event_handler && qp->qp_context) {
  161. event.element.qp = qp;
  162. event.device = qp->device;
  163. qp->event_handler(&event,
  164. qp->qp_context);
  165. }
  166. }
  167. }
  168. static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
  169. struct ib_device *device,
  170. u8 port_num,
  171. u64 subnet_prefix)
  172. {
  173. struct ib_port_pkey *pp, *tmp_pp;
  174. bool comp;
  175. LIST_HEAD(to_error_list);
  176. u16 pkey_val;
  177. if (!ib_get_cached_pkey(device,
  178. port_num,
  179. pkey->pkey_index,
  180. &pkey_val)) {
  181. spin_lock(&pkey->qp_list_lock);
  182. list_for_each_entry(pp, &pkey->qp_list, qp_list) {
  183. if (atomic_read(&pp->sec->error_list_count))
  184. continue;
  185. if (enforce_qp_pkey_security(pkey_val,
  186. subnet_prefix,
  187. pp->sec)) {
  188. atomic_inc(&pp->sec->error_list_count);
  189. list_add(&pp->to_error_list,
  190. &to_error_list);
  191. }
  192. }
  193. spin_unlock(&pkey->qp_list_lock);
  194. }
  195. list_for_each_entry_safe(pp,
  196. tmp_pp,
  197. &to_error_list,
  198. to_error_list) {
  199. mutex_lock(&pp->sec->mutex);
  200. qp_to_error(pp->sec);
  201. list_del(&pp->to_error_list);
  202. atomic_dec(&pp->sec->error_list_count);
  203. comp = pp->sec->destroying;
  204. mutex_unlock(&pp->sec->mutex);
  205. if (comp)
  206. complete(&pp->sec->error_complete);
  207. }
  208. }
  209. /* The caller of this function must hold the QP security
  210. * mutex.
  211. */
  212. static int port_pkey_list_insert(struct ib_port_pkey *pp)
  213. {
  214. struct pkey_index_qp_list *tmp_pkey;
  215. struct pkey_index_qp_list *pkey;
  216. struct ib_device *dev;
  217. u8 port_num = pp->port_num;
  218. int ret = 0;
  219. if (pp->state != IB_PORT_PKEY_VALID)
  220. return 0;
  221. dev = pp->sec->dev;
  222. pkey = get_pkey_idx_qp_list(pp);
  223. if (!pkey) {
  224. bool found = false;
  225. pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
  226. if (!pkey)
  227. return -ENOMEM;
  228. spin_lock(&dev->port_data[port_num].pkey_list_lock);
  229. /* Check for the PKey again. A racing process may
  230. * have created it.
  231. */
  232. list_for_each_entry(tmp_pkey,
  233. &dev->port_data[port_num].pkey_list,
  234. pkey_index_list) {
  235. if (tmp_pkey->pkey_index == pp->pkey_index) {
  236. kfree(pkey);
  237. pkey = tmp_pkey;
  238. found = true;
  239. break;
  240. }
  241. }
  242. if (!found) {
  243. pkey->pkey_index = pp->pkey_index;
  244. spin_lock_init(&pkey->qp_list_lock);
  245. INIT_LIST_HEAD(&pkey->qp_list);
  246. list_add(&pkey->pkey_index_list,
  247. &dev->port_data[port_num].pkey_list);
  248. }
  249. spin_unlock(&dev->port_data[port_num].pkey_list_lock);
  250. }
  251. spin_lock(&pkey->qp_list_lock);
  252. list_add(&pp->qp_list, &pkey->qp_list);
  253. spin_unlock(&pkey->qp_list_lock);
  254. pp->state = IB_PORT_PKEY_LISTED;
  255. return ret;
  256. }
  257. /* The caller of this function must hold the QP security
  258. * mutex.
  259. */
  260. static void port_pkey_list_remove(struct ib_port_pkey *pp)
  261. {
  262. struct pkey_index_qp_list *pkey;
  263. if (pp->state != IB_PORT_PKEY_LISTED)
  264. return;
  265. pkey = get_pkey_idx_qp_list(pp);
  266. spin_lock(&pkey->qp_list_lock);
  267. list_del(&pp->qp_list);
  268. spin_unlock(&pkey->qp_list_lock);
  269. /* The setting may still be valid, i.e. after
  270. * a destroy has failed for example.
  271. */
  272. pp->state = IB_PORT_PKEY_VALID;
  273. }
  274. static void destroy_qp_security(struct ib_qp_security *sec)
  275. {
  276. security_ib_free_security(sec->security);
  277. kfree(sec->ports_pkeys);
  278. kfree(sec);
  279. }
  280. /* The caller of this function must hold the QP security
  281. * mutex.
  282. */
  283. static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
  284. const struct ib_qp_attr *qp_attr,
  285. int qp_attr_mask)
  286. {
  287. struct ib_ports_pkeys *new_pps;
  288. struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
  289. new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
  290. if (!new_pps)
  291. return NULL;
  292. if (qp_attr_mask & IB_QP_PORT)
  293. new_pps->main.port_num = qp_attr->port_num;
  294. else if (qp_pps)
  295. new_pps->main.port_num = qp_pps->main.port_num;
  296. if (qp_attr_mask & IB_QP_PKEY_INDEX)
  297. new_pps->main.pkey_index = qp_attr->pkey_index;
  298. else if (qp_pps)
  299. new_pps->main.pkey_index = qp_pps->main.pkey_index;
  300. if (((qp_attr_mask & IB_QP_PKEY_INDEX) &&
  301. (qp_attr_mask & IB_QP_PORT)) ||
  302. (qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID))
  303. new_pps->main.state = IB_PORT_PKEY_VALID;
  304. if (qp_attr_mask & IB_QP_ALT_PATH) {
  305. new_pps->alt.port_num = qp_attr->alt_port_num;
  306. new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
  307. new_pps->alt.state = IB_PORT_PKEY_VALID;
  308. } else if (qp_pps) {
  309. new_pps->alt.port_num = qp_pps->alt.port_num;
  310. new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
  311. if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
  312. new_pps->alt.state = IB_PORT_PKEY_VALID;
  313. }
  314. new_pps->main.sec = qp->qp_sec;
  315. new_pps->alt.sec = qp->qp_sec;
  316. return new_pps;
  317. }
  318. int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
  319. {
  320. struct ib_qp *real_qp = qp->real_qp;
  321. int ret;
  322. ret = ib_create_qp_security(qp, dev);
  323. if (ret)
  324. return ret;
  325. if (!qp->qp_sec)
  326. return 0;
  327. mutex_lock(&real_qp->qp_sec->mutex);
  328. ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
  329. qp->qp_sec);
  330. if (ret)
  331. goto ret;
  332. if (qp != real_qp)
  333. list_add(&qp->qp_sec->shared_qp_list,
  334. &real_qp->qp_sec->shared_qp_list);
  335. ret:
  336. mutex_unlock(&real_qp->qp_sec->mutex);
  337. if (ret)
  338. destroy_qp_security(qp->qp_sec);
  339. return ret;
  340. }
  341. void ib_close_shared_qp_security(struct ib_qp_security *sec)
  342. {
  343. struct ib_qp *real_qp = sec->qp->real_qp;
  344. mutex_lock(&real_qp->qp_sec->mutex);
  345. list_del(&sec->shared_qp_list);
  346. mutex_unlock(&real_qp->qp_sec->mutex);
  347. destroy_qp_security(sec);
  348. }
  349. int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
  350. {
  351. unsigned int i;
  352. bool is_ib = false;
  353. int ret;
  354. rdma_for_each_port (dev, i) {
  355. is_ib = rdma_protocol_ib(dev, i);
  356. if (is_ib)
  357. break;
  358. }
  359. /* If this isn't an IB device don't create the security context */
  360. if (!is_ib)
  361. return 0;
  362. qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
  363. if (!qp->qp_sec)
  364. return -ENOMEM;
  365. qp->qp_sec->qp = qp;
  366. qp->qp_sec->dev = dev;
  367. mutex_init(&qp->qp_sec->mutex);
  368. INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
  369. atomic_set(&qp->qp_sec->error_list_count, 0);
  370. init_completion(&qp->qp_sec->error_complete);
  371. ret = security_ib_alloc_security(&qp->qp_sec->security);
  372. if (ret) {
  373. kfree(qp->qp_sec);
  374. qp->qp_sec = NULL;
  375. }
  376. return ret;
  377. }
  378. EXPORT_SYMBOL(ib_create_qp_security);
  379. void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
  380. {
  381. /* Return if not IB */
  382. if (!sec)
  383. return;
  384. mutex_lock(&sec->mutex);
  385. /* Remove the QP from the lists so it won't get added to
  386. * a to_error_list during the destroy process.
  387. */
  388. if (sec->ports_pkeys) {
  389. port_pkey_list_remove(&sec->ports_pkeys->main);
  390. port_pkey_list_remove(&sec->ports_pkeys->alt);
  391. }
  392. /* If the QP is already in one or more of those lists
  393. * the destroying flag will ensure the to error flow
  394. * doesn't operate on an undefined QP.
  395. */
  396. sec->destroying = true;
  397. /* Record the error list count to know how many completions
  398. * to wait for.
  399. */
  400. sec->error_comps_pending = atomic_read(&sec->error_list_count);
  401. mutex_unlock(&sec->mutex);
  402. }
  403. void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
  404. {
  405. int ret;
  406. int i;
  407. /* Return if not IB */
  408. if (!sec)
  409. return;
  410. /* If a concurrent cache update is in progress this
  411. * QP security could be marked for an error state
  412. * transition. Wait for this to complete.
  413. */
  414. for (i = 0; i < sec->error_comps_pending; i++)
  415. wait_for_completion(&sec->error_complete);
  416. mutex_lock(&sec->mutex);
  417. sec->destroying = false;
  418. /* Restore the position in the lists and verify
  419. * access is still allowed in case a cache update
  420. * occurred while attempting to destroy.
  421. *
  422. * Because these setting were listed already
  423. * and removed during ib_destroy_qp_security_begin
  424. * we know the pkey_index_qp_list for the PKey
  425. * already exists so port_pkey_list_insert won't fail.
  426. */
  427. if (sec->ports_pkeys) {
  428. port_pkey_list_insert(&sec->ports_pkeys->main);
  429. port_pkey_list_insert(&sec->ports_pkeys->alt);
  430. }
  431. ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
  432. if (ret)
  433. qp_to_error(sec);
  434. mutex_unlock(&sec->mutex);
  435. }
  436. void ib_destroy_qp_security_end(struct ib_qp_security *sec)
  437. {
  438. int i;
  439. /* Return if not IB */
  440. if (!sec)
  441. return;
  442. /* If a concurrent cache update is occurring we must
  443. * wait until this QP security structure is processed
  444. * in the QP to error flow before destroying it because
  445. * the to_error_list is in use.
  446. */
  447. for (i = 0; i < sec->error_comps_pending; i++)
  448. wait_for_completion(&sec->error_complete);
  449. destroy_qp_security(sec);
  450. }
  451. void ib_security_cache_change(struct ib_device *device,
  452. u8 port_num,
  453. u64 subnet_prefix)
  454. {
  455. struct pkey_index_qp_list *pkey;
  456. list_for_each_entry (pkey, &device->port_data[port_num].pkey_list,
  457. pkey_index_list) {
  458. check_pkey_qps(pkey,
  459. device,
  460. port_num,
  461. subnet_prefix);
  462. }
  463. }
  464. void ib_security_release_port_pkey_list(struct ib_device *device)
  465. {
  466. struct pkey_index_qp_list *pkey, *tmp_pkey;
  467. unsigned int i;
  468. rdma_for_each_port (device, i) {
  469. list_for_each_entry_safe(pkey,
  470. tmp_pkey,
  471. &device->port_data[i].pkey_list,
  472. pkey_index_list) {
  473. list_del(&pkey->pkey_index_list);
  474. kfree(pkey);
  475. }
  476. }
  477. }
  478. int ib_security_modify_qp(struct ib_qp *qp,
  479. struct ib_qp_attr *qp_attr,
  480. int qp_attr_mask,
  481. struct ib_udata *udata)
  482. {
  483. int ret = 0;
  484. struct ib_ports_pkeys *tmp_pps;
  485. struct ib_ports_pkeys *new_pps = NULL;
  486. struct ib_qp *real_qp = qp->real_qp;
  487. bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
  488. real_qp->qp_type == IB_QPT_GSI ||
  489. real_qp->qp_type >= IB_QPT_RESERVED1);
  490. bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
  491. (qp_attr_mask & IB_QP_ALT_PATH));
  492. WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
  493. rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
  494. !real_qp->qp_sec),
  495. "%s: QP security is not initialized for IB QP: %d\n",
  496. __func__, real_qp->qp_num);
  497. /* The port/pkey settings are maintained only for the real QP. Open
  498. * handles on the real QP will be in the shared_qp_list. When
  499. * enforcing security on the real QP all the shared QPs will be
  500. * checked as well.
  501. */
  502. if (pps_change && !special_qp && real_qp->qp_sec) {
  503. mutex_lock(&real_qp->qp_sec->mutex);
  504. new_pps = get_new_pps(real_qp,
  505. qp_attr,
  506. qp_attr_mask);
  507. if (!new_pps) {
  508. mutex_unlock(&real_qp->qp_sec->mutex);
  509. return -ENOMEM;
  510. }
  511. /* Add this QP to the lists for the new port
  512. * and pkey settings before checking for permission
  513. * in case there is a concurrent cache update
  514. * occurring. Walking the list for a cache change
  515. * doesn't acquire the security mutex unless it's
  516. * sending the QP to error.
  517. */
  518. ret = port_pkey_list_insert(&new_pps->main);
  519. if (!ret)
  520. ret = port_pkey_list_insert(&new_pps->alt);
  521. if (!ret)
  522. ret = check_qp_port_pkey_settings(new_pps,
  523. real_qp->qp_sec);
  524. }
  525. if (!ret)
  526. ret = real_qp->device->ops.modify_qp(real_qp,
  527. qp_attr,
  528. qp_attr_mask,
  529. udata);
  530. if (new_pps) {
  531. /* Clean up the lists and free the appropriate
  532. * ports_pkeys structure.
  533. */
  534. if (ret) {
  535. tmp_pps = new_pps;
  536. } else {
  537. tmp_pps = real_qp->qp_sec->ports_pkeys;
  538. real_qp->qp_sec->ports_pkeys = new_pps;
  539. }
  540. if (tmp_pps) {
  541. port_pkey_list_remove(&tmp_pps->main);
  542. port_pkey_list_remove(&tmp_pps->alt);
  543. }
  544. kfree(tmp_pps);
  545. mutex_unlock(&real_qp->qp_sec->mutex);
  546. }
  547. return ret;
  548. }
  549. static int ib_security_pkey_access(struct ib_device *dev,
  550. u8 port_num,
  551. u16 pkey_index,
  552. void *sec)
  553. {
  554. u64 subnet_prefix;
  555. u16 pkey;
  556. int ret;
  557. if (!rdma_protocol_ib(dev, port_num))
  558. return 0;
  559. ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
  560. if (ret)
  561. return ret;
  562. ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
  563. if (ret)
  564. return ret;
  565. return security_ib_pkey_access(sec, subnet_prefix, pkey);
  566. }
  567. void ib_mad_agent_security_change(void)
  568. {
  569. struct ib_mad_agent *ag;
  570. spin_lock(&mad_agent_list_lock);
  571. list_for_each_entry(ag,
  572. &mad_agent_list,
  573. mad_agent_sec_list)
  574. WRITE_ONCE(ag->smp_allowed,
  575. !security_ib_endport_manage_subnet(ag->security,
  576. dev_name(&ag->device->dev), ag->port_num));
  577. spin_unlock(&mad_agent_list_lock);
  578. }
  579. int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
  580. enum ib_qp_type qp_type)
  581. {
  582. int ret;
  583. if (!rdma_protocol_ib(agent->device, agent->port_num))
  584. return 0;
  585. INIT_LIST_HEAD(&agent->mad_agent_sec_list);
  586. ret = security_ib_alloc_security(&agent->security);
  587. if (ret)
  588. return ret;
  589. if (qp_type != IB_QPT_SMI)
  590. return 0;
  591. spin_lock(&mad_agent_list_lock);
  592. ret = security_ib_endport_manage_subnet(agent->security,
  593. dev_name(&agent->device->dev),
  594. agent->port_num);
  595. if (ret)
  596. goto free_security;
  597. WRITE_ONCE(agent->smp_allowed, true);
  598. list_add(&agent->mad_agent_sec_list, &mad_agent_list);
  599. spin_unlock(&mad_agent_list_lock);
  600. return 0;
  601. free_security:
  602. spin_unlock(&mad_agent_list_lock);
  603. security_ib_free_security(agent->security);
  604. return ret;
  605. }
  606. void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
  607. {
  608. if (!rdma_protocol_ib(agent->device, agent->port_num))
  609. return;
  610. if (agent->qp->qp_type == IB_QPT_SMI) {
  611. spin_lock(&mad_agent_list_lock);
  612. list_del(&agent->mad_agent_sec_list);
  613. spin_unlock(&mad_agent_list_lock);
  614. }
  615. security_ib_free_security(agent->security);
  616. }
  617. int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
  618. {
  619. if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
  620. return 0;
  621. if (map->agent.qp->qp_type == IB_QPT_SMI) {
  622. if (!READ_ONCE(map->agent.smp_allowed))
  623. return -EACCES;
  624. return 0;
  625. }
  626. return ib_security_pkey_access(map->agent.device,
  627. map->agent.port_num,
  628. pkey_index,
  629. map->agent.security);
  630. }