core.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. // SPDX-License-Identifier: GPL-2.0+
  2. // Copyright 2019 IBM Corp.
  3. #include <linux/idr.h>
  4. #include "ocxl_internal.h"
  5. static struct ocxl_fn *ocxl_fn_get(struct ocxl_fn *fn)
  6. {
  7. return (get_device(&fn->dev) == NULL) ? NULL : fn;
  8. }
  9. static void ocxl_fn_put(struct ocxl_fn *fn)
  10. {
  11. put_device(&fn->dev);
  12. }
  13. static struct ocxl_afu *alloc_afu(struct ocxl_fn *fn)
  14. {
  15. struct ocxl_afu *afu;
  16. afu = kzalloc(sizeof(struct ocxl_afu), GFP_KERNEL);
  17. if (!afu)
  18. return NULL;
  19. kref_init(&afu->kref);
  20. mutex_init(&afu->contexts_lock);
  21. mutex_init(&afu->afu_control_lock);
  22. idr_init(&afu->contexts_idr);
  23. afu->fn = fn;
  24. ocxl_fn_get(fn);
  25. return afu;
  26. }
  27. static void free_afu(struct kref *kref)
  28. {
  29. struct ocxl_afu *afu = container_of(kref, struct ocxl_afu, kref);
  30. idr_destroy(&afu->contexts_idr);
  31. ocxl_fn_put(afu->fn);
  32. kfree(afu);
  33. }
  34. void ocxl_afu_get(struct ocxl_afu *afu)
  35. {
  36. kref_get(&afu->kref);
  37. }
  38. EXPORT_SYMBOL_GPL(ocxl_afu_get);
  39. void ocxl_afu_put(struct ocxl_afu *afu)
  40. {
  41. kref_put(&afu->kref, free_afu);
  42. }
  43. EXPORT_SYMBOL_GPL(ocxl_afu_put);
  44. static int assign_afu_actag(struct ocxl_afu *afu)
  45. {
  46. struct ocxl_fn *fn = afu->fn;
  47. int actag_count, actag_offset;
  48. struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
  49. /*
  50. * if there were not enough actags for the function, each afu
  51. * reduces its count as well
  52. */
  53. actag_count = afu->config.actag_supported *
  54. fn->actag_enabled / fn->actag_supported;
  55. actag_offset = ocxl_actag_afu_alloc(fn, actag_count);
  56. if (actag_offset < 0) {
  57. dev_err(&pci_dev->dev, "Can't allocate %d actags for AFU: %d\n",
  58. actag_count, actag_offset);
  59. return actag_offset;
  60. }
  61. afu->actag_base = fn->actag_base + actag_offset;
  62. afu->actag_enabled = actag_count;
  63. ocxl_config_set_afu_actag(pci_dev, afu->config.dvsec_afu_control_pos,
  64. afu->actag_base, afu->actag_enabled);
  65. dev_dbg(&pci_dev->dev, "actag base=%d enabled=%d\n",
  66. afu->actag_base, afu->actag_enabled);
  67. return 0;
  68. }
  69. static void reclaim_afu_actag(struct ocxl_afu *afu)
  70. {
  71. struct ocxl_fn *fn = afu->fn;
  72. int start_offset, size;
  73. start_offset = afu->actag_base - fn->actag_base;
  74. size = afu->actag_enabled;
  75. ocxl_actag_afu_free(afu->fn, start_offset, size);
  76. }
  77. static int assign_afu_pasid(struct ocxl_afu *afu)
  78. {
  79. struct ocxl_fn *fn = afu->fn;
  80. int pasid_count, pasid_offset;
  81. struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
  82. /*
  83. * We only support the case where the function configuration
  84. * requested enough PASIDs to cover all AFUs.
  85. */
  86. pasid_count = 1 << afu->config.pasid_supported_log;
  87. pasid_offset = ocxl_pasid_afu_alloc(fn, pasid_count);
  88. if (pasid_offset < 0) {
  89. dev_err(&pci_dev->dev, "Can't allocate %d PASIDs for AFU: %d\n",
  90. pasid_count, pasid_offset);
  91. return pasid_offset;
  92. }
  93. afu->pasid_base = fn->pasid_base + pasid_offset;
  94. afu->pasid_count = 0;
  95. afu->pasid_max = pasid_count;
  96. ocxl_config_set_afu_pasid(pci_dev, afu->config.dvsec_afu_control_pos,
  97. afu->pasid_base,
  98. afu->config.pasid_supported_log);
  99. dev_dbg(&pci_dev->dev, "PASID base=%d, enabled=%d\n",
  100. afu->pasid_base, pasid_count);
  101. return 0;
  102. }
  103. static void reclaim_afu_pasid(struct ocxl_afu *afu)
  104. {
  105. struct ocxl_fn *fn = afu->fn;
  106. int start_offset, size;
  107. start_offset = afu->pasid_base - fn->pasid_base;
  108. size = 1 << afu->config.pasid_supported_log;
  109. ocxl_pasid_afu_free(afu->fn, start_offset, size);
  110. }
  111. static int reserve_fn_bar(struct ocxl_fn *fn, int bar)
  112. {
  113. struct pci_dev *dev = to_pci_dev(fn->dev.parent);
  114. int rc, idx;
  115. if (bar != 0 && bar != 2 && bar != 4)
  116. return -EINVAL;
  117. idx = bar >> 1;
  118. if (fn->bar_used[idx]++ == 0) {
  119. rc = pci_request_region(dev, bar, "ocxl");
  120. if (rc)
  121. return rc;
  122. }
  123. return 0;
  124. }
  125. static void release_fn_bar(struct ocxl_fn *fn, int bar)
  126. {
  127. struct pci_dev *dev = to_pci_dev(fn->dev.parent);
  128. int idx;
  129. if (bar != 0 && bar != 2 && bar != 4)
  130. return;
  131. idx = bar >> 1;
  132. if (--fn->bar_used[idx] == 0)
  133. pci_release_region(dev, bar);
  134. WARN_ON(fn->bar_used[idx] < 0);
  135. }
  136. static int map_mmio_areas(struct ocxl_afu *afu)
  137. {
  138. int rc;
  139. struct pci_dev *pci_dev = to_pci_dev(afu->fn->dev.parent);
  140. rc = reserve_fn_bar(afu->fn, afu->config.global_mmio_bar);
  141. if (rc)
  142. return rc;
  143. rc = reserve_fn_bar(afu->fn, afu->config.pp_mmio_bar);
  144. if (rc) {
  145. release_fn_bar(afu->fn, afu->config.global_mmio_bar);
  146. return rc;
  147. }
  148. afu->global_mmio_start =
  149. pci_resource_start(pci_dev, afu->config.global_mmio_bar) +
  150. afu->config.global_mmio_offset;
  151. afu->pp_mmio_start =
  152. pci_resource_start(pci_dev, afu->config.pp_mmio_bar) +
  153. afu->config.pp_mmio_offset;
  154. afu->global_mmio_ptr = ioremap(afu->global_mmio_start,
  155. afu->config.global_mmio_size);
  156. if (!afu->global_mmio_ptr) {
  157. release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
  158. release_fn_bar(afu->fn, afu->config.global_mmio_bar);
  159. dev_err(&pci_dev->dev, "Error mapping global mmio area\n");
  160. return -ENOMEM;
  161. }
  162. /*
  163. * Leave an empty page between the per-process mmio area and
  164. * the AFU interrupt mappings
  165. */
  166. afu->irq_base_offset = afu->config.pp_mmio_stride + PAGE_SIZE;
  167. return 0;
  168. }
  169. static void unmap_mmio_areas(struct ocxl_afu *afu)
  170. {
  171. if (afu->global_mmio_ptr) {
  172. iounmap(afu->global_mmio_ptr);
  173. afu->global_mmio_ptr = NULL;
  174. }
  175. afu->global_mmio_start = 0;
  176. afu->pp_mmio_start = 0;
  177. release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
  178. release_fn_bar(afu->fn, afu->config.global_mmio_bar);
  179. }
  180. static int configure_afu(struct ocxl_afu *afu, u8 afu_idx, struct pci_dev *dev)
  181. {
  182. int rc;
  183. rc = ocxl_config_read_afu(dev, &afu->fn->config, &afu->config, afu_idx);
  184. if (rc)
  185. return rc;
  186. rc = assign_afu_actag(afu);
  187. if (rc)
  188. return rc;
  189. rc = assign_afu_pasid(afu);
  190. if (rc)
  191. goto err_free_actag;
  192. rc = map_mmio_areas(afu);
  193. if (rc)
  194. goto err_free_pasid;
  195. return 0;
  196. err_free_pasid:
  197. reclaim_afu_pasid(afu);
  198. err_free_actag:
  199. reclaim_afu_actag(afu);
  200. return rc;
  201. }
  202. static void deconfigure_afu(struct ocxl_afu *afu)
  203. {
  204. unmap_mmio_areas(afu);
  205. reclaim_afu_pasid(afu);
  206. reclaim_afu_actag(afu);
  207. }
  208. static int activate_afu(struct pci_dev *dev, struct ocxl_afu *afu)
  209. {
  210. ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 1);
  211. return 0;
  212. }
  213. static void deactivate_afu(struct ocxl_afu *afu)
  214. {
  215. struct pci_dev *dev = to_pci_dev(afu->fn->dev.parent);
  216. ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 0);
  217. }
  218. static int init_afu(struct pci_dev *dev, struct ocxl_fn *fn, u8 afu_idx)
  219. {
  220. int rc;
  221. struct ocxl_afu *afu;
  222. afu = alloc_afu(fn);
  223. if (!afu)
  224. return -ENOMEM;
  225. rc = configure_afu(afu, afu_idx, dev);
  226. if (rc) {
  227. ocxl_afu_put(afu);
  228. return rc;
  229. }
  230. rc = activate_afu(dev, afu);
  231. if (rc) {
  232. deconfigure_afu(afu);
  233. ocxl_afu_put(afu);
  234. return rc;
  235. }
  236. list_add_tail(&afu->list, &fn->afu_list);
  237. return 0;
  238. }
  239. static void remove_afu(struct ocxl_afu *afu)
  240. {
  241. list_del(&afu->list);
  242. ocxl_context_detach_all(afu);
  243. deactivate_afu(afu);
  244. deconfigure_afu(afu);
  245. ocxl_afu_put(afu); // matches the implicit get in alloc_afu
  246. }
  247. static struct ocxl_fn *alloc_function(void)
  248. {
  249. struct ocxl_fn *fn;
  250. fn = kzalloc(sizeof(struct ocxl_fn), GFP_KERNEL);
  251. if (!fn)
  252. return NULL;
  253. INIT_LIST_HEAD(&fn->afu_list);
  254. INIT_LIST_HEAD(&fn->pasid_list);
  255. INIT_LIST_HEAD(&fn->actag_list);
  256. return fn;
  257. }
  258. static void free_function(struct ocxl_fn *fn)
  259. {
  260. WARN_ON(!list_empty(&fn->afu_list));
  261. WARN_ON(!list_empty(&fn->pasid_list));
  262. kfree(fn);
  263. }
  264. static void free_function_dev(struct device *dev)
  265. {
  266. struct ocxl_fn *fn = container_of(dev, struct ocxl_fn, dev);
  267. free_function(fn);
  268. }
  269. static int set_function_device(struct ocxl_fn *fn, struct pci_dev *dev)
  270. {
  271. fn->dev.parent = &dev->dev;
  272. fn->dev.release = free_function_dev;
  273. return dev_set_name(&fn->dev, "ocxlfn.%s", dev_name(&dev->dev));
  274. }
  275. static int assign_function_actag(struct ocxl_fn *fn)
  276. {
  277. struct pci_dev *dev = to_pci_dev(fn->dev.parent);
  278. u16 base, enabled, supported;
  279. int rc;
  280. rc = ocxl_config_get_actag_info(dev, &base, &enabled, &supported);
  281. if (rc)
  282. return rc;
  283. fn->actag_base = base;
  284. fn->actag_enabled = enabled;
  285. fn->actag_supported = supported;
  286. ocxl_config_set_actag(dev, fn->config.dvsec_function_pos,
  287. fn->actag_base, fn->actag_enabled);
  288. dev_dbg(&fn->dev, "actag range starting at %d, enabled %d\n",
  289. fn->actag_base, fn->actag_enabled);
  290. return 0;
  291. }
  292. static int set_function_pasid(struct ocxl_fn *fn)
  293. {
  294. struct pci_dev *dev = to_pci_dev(fn->dev.parent);
  295. int rc, desired_count, max_count;
  296. /* A function may not require any PASID */
  297. if (fn->config.max_pasid_log < 0)
  298. return 0;
  299. rc = ocxl_config_get_pasid_info(dev, &max_count);
  300. if (rc)
  301. return rc;
  302. desired_count = 1 << fn->config.max_pasid_log;
  303. if (desired_count > max_count) {
  304. dev_err(&fn->dev,
  305. "Function requires more PASIDs than is available (%d vs. %d)\n",
  306. desired_count, max_count);
  307. return -ENOSPC;
  308. }
  309. fn->pasid_base = 0;
  310. return 0;
  311. }
  312. static int configure_function(struct ocxl_fn *fn, struct pci_dev *dev)
  313. {
  314. int rc;
  315. rc = pci_enable_device(dev);
  316. if (rc) {
  317. dev_err(&dev->dev, "pci_enable_device failed: %d\n", rc);
  318. return rc;
  319. }
  320. /*
  321. * Once it has been confirmed to work on our hardware, we
  322. * should reset the function, to force the adapter to restart
  323. * from scratch.
  324. * A function reset would also reset all its AFUs.
  325. *
  326. * Some hints for implementation:
  327. *
  328. * - there's not status bit to know when the reset is done. We
  329. * should try reading the config space to know when it's
  330. * done.
  331. * - probably something like:
  332. * Reset
  333. * wait 100ms
  334. * issue config read
  335. * allow device up to 1 sec to return success on config
  336. * read before declaring it broken
  337. *
  338. * Some shared logic on the card (CFG, TLX) won't be reset, so
  339. * there's no guarantee that it will be enough.
  340. */
  341. rc = ocxl_config_read_function(dev, &fn->config);
  342. if (rc)
  343. return rc;
  344. rc = set_function_device(fn, dev);
  345. if (rc)
  346. return rc;
  347. rc = assign_function_actag(fn);
  348. if (rc)
  349. return rc;
  350. rc = set_function_pasid(fn);
  351. if (rc)
  352. return rc;
  353. rc = ocxl_link_setup(dev, 0, &fn->link);
  354. if (rc)
  355. return rc;
  356. rc = ocxl_config_set_TL(dev, fn->config.dvsec_tl_pos);
  357. if (rc) {
  358. ocxl_link_release(dev, fn->link);
  359. return rc;
  360. }
  361. return 0;
  362. }
  363. static void deconfigure_function(struct ocxl_fn *fn)
  364. {
  365. struct pci_dev *dev = to_pci_dev(fn->dev.parent);
  366. ocxl_link_release(dev, fn->link);
  367. pci_disable_device(dev);
  368. }
  369. static struct ocxl_fn *init_function(struct pci_dev *dev)
  370. {
  371. struct ocxl_fn *fn;
  372. int rc;
  373. fn = alloc_function();
  374. if (!fn)
  375. return ERR_PTR(-ENOMEM);
  376. rc = configure_function(fn, dev);
  377. if (rc) {
  378. free_function(fn);
  379. return ERR_PTR(rc);
  380. }
  381. rc = device_register(&fn->dev);
  382. if (rc) {
  383. deconfigure_function(fn);
  384. put_device(&fn->dev);
  385. return ERR_PTR(rc);
  386. }
  387. return fn;
  388. }
  389. // Device detection & initialisation
  390. struct ocxl_fn *ocxl_function_open(struct pci_dev *dev)
  391. {
  392. int rc, afu_count = 0;
  393. u8 afu;
  394. struct ocxl_fn *fn;
  395. if (!radix_enabled()) {
  396. dev_err(&dev->dev, "Unsupported memory model (hash)\n");
  397. return ERR_PTR(-ENODEV);
  398. }
  399. fn = init_function(dev);
  400. if (IS_ERR(fn)) {
  401. dev_err(&dev->dev, "function init failed: %li\n",
  402. PTR_ERR(fn));
  403. return fn;
  404. }
  405. for (afu = 0; afu <= fn->config.max_afu_index; afu++) {
  406. rc = ocxl_config_check_afu_index(dev, &fn->config, afu);
  407. if (rc > 0) {
  408. rc = init_afu(dev, fn, afu);
  409. if (rc) {
  410. dev_err(&dev->dev,
  411. "Can't initialize AFU index %d\n", afu);
  412. continue;
  413. }
  414. afu_count++;
  415. }
  416. }
  417. dev_info(&dev->dev, "%d AFU(s) configured\n", afu_count);
  418. return fn;
  419. }
  420. EXPORT_SYMBOL_GPL(ocxl_function_open);
  421. struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn)
  422. {
  423. return &fn->afu_list;
  424. }
  425. EXPORT_SYMBOL_GPL(ocxl_function_afu_list);
  426. struct ocxl_afu *ocxl_function_fetch_afu(struct ocxl_fn *fn, u8 afu_idx)
  427. {
  428. struct ocxl_afu *afu;
  429. list_for_each_entry(afu, &fn->afu_list, list) {
  430. if (afu->config.idx == afu_idx)
  431. return afu;
  432. }
  433. return NULL;
  434. }
  435. EXPORT_SYMBOL_GPL(ocxl_function_fetch_afu);
  436. const struct ocxl_fn_config *ocxl_function_config(struct ocxl_fn *fn)
  437. {
  438. return &fn->config;
  439. }
  440. EXPORT_SYMBOL_GPL(ocxl_function_config);
  441. void ocxl_function_close(struct ocxl_fn *fn)
  442. {
  443. struct ocxl_afu *afu, *tmp;
  444. list_for_each_entry_safe(afu, tmp, &fn->afu_list, list) {
  445. remove_afu(afu);
  446. }
  447. deconfigure_function(fn);
  448. device_unregister(&fn->dev);
  449. }
  450. EXPORT_SYMBOL_GPL(ocxl_function_close);
  451. // AFU Metadata
  452. struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu)
  453. {
  454. return &afu->config;
  455. }
  456. EXPORT_SYMBOL_GPL(ocxl_afu_config);
  457. void ocxl_afu_set_private(struct ocxl_afu *afu, void *private)
  458. {
  459. afu->private = private;
  460. }
  461. EXPORT_SYMBOL_GPL(ocxl_afu_set_private);
  462. void *ocxl_afu_get_private(struct ocxl_afu *afu)
  463. {
  464. if (afu)
  465. return afu->private;
  466. return NULL;
  467. }
  468. EXPORT_SYMBOL_GPL(ocxl_afu_get_private);