ioasid.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * I/O Address Space ID allocator. There is one global IOASID space, split into
  4. * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
  5. * free IOASIDs with ioasid_alloc and ioasid_free.
  6. */
  7. #include <linux/ioasid.h>
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/xarray.h>
  12. struct ioasid_data {
  13. ioasid_t id;
  14. struct ioasid_set *set;
  15. void *private;
  16. struct rcu_head rcu;
  17. };
  18. /*
  19. * struct ioasid_allocator_data - Internal data structure to hold information
  20. * about an allocator. There are two types of allocators:
  21. *
  22. * - Default allocator always has its own XArray to track the IOASIDs allocated.
  23. * - Custom allocators may share allocation helpers with different private data.
  24. * Custom allocators that share the same helper functions also share the same
  25. * XArray.
  26. * Rules:
  27. * 1. Default allocator is always available, not dynamically registered. This is
  28. * to prevent race conditions with early boot code that want to register
  29. * custom allocators or allocate IOASIDs.
  30. * 2. Custom allocators take precedence over the default allocator.
  31. * 3. When all custom allocators sharing the same helper functions are
  32. * unregistered (e.g. due to hotplug), all outstanding IOASIDs must be
  33. * freed. Otherwise, outstanding IOASIDs will be lost and orphaned.
  34. * 4. When switching between custom allocators sharing the same helper
  35. * functions, outstanding IOASIDs are preserved.
  36. * 5. When switching between custom allocator and default allocator, all IOASIDs
  37. * must be freed to ensure unadulterated space for the new allocator.
  38. *
  39. * @ops: allocator helper functions and its data
  40. * @list: registered custom allocators
  41. * @slist: allocators share the same ops but different data
  42. * @flags: attributes of the allocator
  43. * @xa: xarray holds the IOASID space
  44. * @rcu: used for kfree_rcu when unregistering allocator
  45. */
  46. struct ioasid_allocator_data {
  47. struct ioasid_allocator_ops *ops;
  48. struct list_head list;
  49. struct list_head slist;
  50. #define IOASID_ALLOCATOR_CUSTOM BIT(0) /* Needs framework to track results */
  51. unsigned long flags;
  52. struct xarray xa;
  53. struct rcu_head rcu;
  54. };
  55. static DEFINE_SPINLOCK(ioasid_allocator_lock);
  56. static LIST_HEAD(allocators_list);
  57. static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque);
  58. static void default_free(ioasid_t ioasid, void *opaque);
  59. static struct ioasid_allocator_ops default_ops = {
  60. .alloc = default_alloc,
  61. .free = default_free,
  62. };
  63. static struct ioasid_allocator_data default_allocator = {
  64. .ops = &default_ops,
  65. .flags = 0,
  66. .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC),
  67. };
  68. static struct ioasid_allocator_data *active_allocator = &default_allocator;
  69. static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque)
  70. {
  71. ioasid_t id;
  72. if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) {
  73. pr_err("Failed to alloc ioasid from %d to %d\n", min, max);
  74. return INVALID_IOASID;
  75. }
  76. return id;
  77. }
  78. static void default_free(ioasid_t ioasid, void *opaque)
  79. {
  80. struct ioasid_data *ioasid_data;
  81. ioasid_data = xa_erase(&default_allocator.xa, ioasid);
  82. kfree_rcu(ioasid_data, rcu);
  83. }
  84. /* Allocate and initialize a new custom allocator with its helper functions */
  85. static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops)
  86. {
  87. struct ioasid_allocator_data *ia_data;
  88. ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC);
  89. if (!ia_data)
  90. return NULL;
  91. xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC);
  92. INIT_LIST_HEAD(&ia_data->slist);
  93. ia_data->flags |= IOASID_ALLOCATOR_CUSTOM;
  94. ia_data->ops = ops;
  95. /* For tracking custom allocators that share the same ops */
  96. list_add_tail(&ops->list, &ia_data->slist);
  97. return ia_data;
  98. }
  99. static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b)
  100. {
  101. return (a->free == b->free) && (a->alloc == b->alloc);
  102. }
  103. /**
  104. * ioasid_register_allocator - register a custom allocator
  105. * @ops: the custom allocator ops to be registered
  106. *
  107. * Custom allocators take precedence over the default xarray based allocator.
  108. * Private data associated with the IOASID allocated by the custom allocators
  109. * are managed by IOASID framework similar to data stored in xa by default
  110. * allocator.
  111. *
  112. * There can be multiple allocators registered but only one is active. In case
  113. * of runtime removal of a custom allocator, the next one is activated based
  114. * on the registration ordering.
  115. *
  116. * Multiple allocators can share the same alloc() function, in this case the
  117. * IOASID space is shared.
  118. */
  119. int ioasid_register_allocator(struct ioasid_allocator_ops *ops)
  120. {
  121. struct ioasid_allocator_data *ia_data;
  122. struct ioasid_allocator_data *pallocator;
  123. int ret = 0;
  124. spin_lock(&ioasid_allocator_lock);
  125. ia_data = ioasid_alloc_allocator(ops);
  126. if (!ia_data) {
  127. ret = -ENOMEM;
  128. goto out_unlock;
  129. }
  130. /*
  131. * No particular preference, we activate the first one and keep
  132. * the later registered allocators in a list in case the first one gets
  133. * removed due to hotplug.
  134. */
  135. if (list_empty(&allocators_list)) {
  136. WARN_ON(active_allocator != &default_allocator);
  137. /* Use this new allocator if default is not active */
  138. if (xa_empty(&active_allocator->xa)) {
  139. rcu_assign_pointer(active_allocator, ia_data);
  140. list_add_tail(&ia_data->list, &allocators_list);
  141. goto out_unlock;
  142. }
  143. pr_warn("Default allocator active with outstanding IOASID\n");
  144. ret = -EAGAIN;
  145. goto out_free;
  146. }
  147. /* Check if the allocator is already registered */
  148. list_for_each_entry(pallocator, &allocators_list, list) {
  149. if (pallocator->ops == ops) {
  150. pr_err("IOASID allocator already registered\n");
  151. ret = -EEXIST;
  152. goto out_free;
  153. } else if (use_same_ops(pallocator->ops, ops)) {
  154. /*
  155. * If the new allocator shares the same ops,
  156. * then they will share the same IOASID space.
  157. * We should put them under the same xarray.
  158. */
  159. list_add_tail(&ops->list, &pallocator->slist);
  160. goto out_free;
  161. }
  162. }
  163. list_add_tail(&ia_data->list, &allocators_list);
  164. spin_unlock(&ioasid_allocator_lock);
  165. return 0;
  166. out_free:
  167. kfree(ia_data);
  168. out_unlock:
  169. spin_unlock(&ioasid_allocator_lock);
  170. return ret;
  171. }
  172. EXPORT_SYMBOL_GPL(ioasid_register_allocator);
  173. /**
  174. * ioasid_unregister_allocator - Remove a custom IOASID allocator ops
  175. * @ops: the custom allocator to be removed
  176. *
  177. * Remove an allocator from the list, activate the next allocator in
  178. * the order it was registered. Or revert to default allocator if all
  179. * custom allocators are unregistered without outstanding IOASIDs.
  180. */
  181. void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops)
  182. {
  183. struct ioasid_allocator_data *pallocator;
  184. struct ioasid_allocator_ops *sops;
  185. spin_lock(&ioasid_allocator_lock);
  186. if (list_empty(&allocators_list)) {
  187. pr_warn("No custom IOASID allocators active!\n");
  188. goto exit_unlock;
  189. }
  190. list_for_each_entry(pallocator, &allocators_list, list) {
  191. if (!use_same_ops(pallocator->ops, ops))
  192. continue;
  193. if (list_is_singular(&pallocator->slist)) {
  194. /* No shared helper functions */
  195. list_del(&pallocator->list);
  196. /*
  197. * All IOASIDs should have been freed before
  198. * the last allocator that shares the same ops
  199. * is unregistered.
  200. */
  201. WARN_ON(!xa_empty(&pallocator->xa));
  202. if (list_empty(&allocators_list)) {
  203. pr_info("No custom IOASID allocators, switch to default.\n");
  204. rcu_assign_pointer(active_allocator, &default_allocator);
  205. } else if (pallocator == active_allocator) {
  206. rcu_assign_pointer(active_allocator,
  207. list_first_entry(&allocators_list,
  208. struct ioasid_allocator_data, list));
  209. pr_info("IOASID allocator changed");
  210. }
  211. kfree_rcu(pallocator, rcu);
  212. break;
  213. }
  214. /*
  215. * Find the matching shared ops to delete,
  216. * but keep outstanding IOASIDs
  217. */
  218. list_for_each_entry(sops, &pallocator->slist, list) {
  219. if (sops == ops) {
  220. list_del(&ops->list);
  221. break;
  222. }
  223. }
  224. break;
  225. }
  226. exit_unlock:
  227. spin_unlock(&ioasid_allocator_lock);
  228. }
  229. EXPORT_SYMBOL_GPL(ioasid_unregister_allocator);
  230. /**
  231. * ioasid_set_data - Set private data for an allocated ioasid
  232. * @ioasid: the ID to set data
  233. * @data: the private data
  234. *
  235. * For IOASID that is already allocated, private data can be set
  236. * via this API. Future lookup can be done via ioasid_find.
  237. */
  238. int ioasid_set_data(ioasid_t ioasid, void *data)
  239. {
  240. struct ioasid_data *ioasid_data;
  241. int ret = 0;
  242. spin_lock(&ioasid_allocator_lock);
  243. ioasid_data = xa_load(&active_allocator->xa, ioasid);
  244. if (ioasid_data)
  245. rcu_assign_pointer(ioasid_data->private, data);
  246. else
  247. ret = -ENOENT;
  248. spin_unlock(&ioasid_allocator_lock);
  249. /*
  250. * Wait for readers to stop accessing the old private data, so the
  251. * caller can free it.
  252. */
  253. if (!ret)
  254. synchronize_rcu();
  255. return ret;
  256. }
  257. EXPORT_SYMBOL_GPL(ioasid_set_data);
  258. /**
  259. * ioasid_alloc - Allocate an IOASID
  260. * @set: the IOASID set
  261. * @min: the minimum ID (inclusive)
  262. * @max: the maximum ID (inclusive)
  263. * @private: data private to the caller
  264. *
  265. * Allocate an ID between @min and @max. The @private pointer is stored
  266. * internally and can be retrieved with ioasid_find().
  267. *
  268. * Return: the allocated ID on success, or %INVALID_IOASID on failure.
  269. */
  270. ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
  271. void *private)
  272. {
  273. struct ioasid_data *data;
  274. void *adata;
  275. ioasid_t id;
  276. data = kzalloc(sizeof(*data), GFP_ATOMIC);
  277. if (!data)
  278. return INVALID_IOASID;
  279. data->set = set;
  280. data->private = private;
  281. /*
  282. * Custom allocator needs allocator data to perform platform specific
  283. * operations.
  284. */
  285. spin_lock(&ioasid_allocator_lock);
  286. adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data;
  287. id = active_allocator->ops->alloc(min, max, adata);
  288. if (id == INVALID_IOASID) {
  289. pr_err("Failed ASID allocation %lu\n", active_allocator->flags);
  290. goto exit_free;
  291. }
  292. if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) &&
  293. xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) {
  294. /* Custom allocator needs framework to store and track allocation results */
  295. pr_err("Failed to alloc ioasid from %d\n", id);
  296. active_allocator->ops->free(id, active_allocator->ops->pdata);
  297. goto exit_free;
  298. }
  299. data->id = id;
  300. spin_unlock(&ioasid_allocator_lock);
  301. return id;
  302. exit_free:
  303. spin_unlock(&ioasid_allocator_lock);
  304. kfree(data);
  305. return INVALID_IOASID;
  306. }
  307. EXPORT_SYMBOL_GPL(ioasid_alloc);
  308. /**
  309. * ioasid_free - Free an IOASID
  310. * @ioasid: the ID to remove
  311. */
  312. void ioasid_free(ioasid_t ioasid)
  313. {
  314. struct ioasid_data *ioasid_data;
  315. spin_lock(&ioasid_allocator_lock);
  316. ioasid_data = xa_load(&active_allocator->xa, ioasid);
  317. if (!ioasid_data) {
  318. pr_err("Trying to free unknown IOASID %u\n", ioasid);
  319. goto exit_unlock;
  320. }
  321. active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
  322. /* Custom allocator needs additional steps to free the xa element */
  323. if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
  324. ioasid_data = xa_erase(&active_allocator->xa, ioasid);
  325. kfree_rcu(ioasid_data, rcu);
  326. }
  327. exit_unlock:
  328. spin_unlock(&ioasid_allocator_lock);
  329. }
  330. EXPORT_SYMBOL_GPL(ioasid_free);
  331. /**
  332. * ioasid_find - Find IOASID data
  333. * @set: the IOASID set
  334. * @ioasid: the IOASID to find
  335. * @getter: function to call on the found object
  336. *
  337. * The optional getter function allows to take a reference to the found object
  338. * under the rcu lock. The function can also check if the object is still valid:
  339. * if @getter returns false, then the object is invalid and NULL is returned.
  340. *
  341. * If the IOASID exists, return the private pointer passed to ioasid_alloc.
  342. * Private data can be NULL if not set. Return an error if the IOASID is not
  343. * found, or if @set is not NULL and the IOASID does not belong to the set.
  344. */
  345. void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
  346. bool (*getter)(void *))
  347. {
  348. void *priv;
  349. struct ioasid_data *ioasid_data;
  350. struct ioasid_allocator_data *idata;
  351. rcu_read_lock();
  352. idata = rcu_dereference(active_allocator);
  353. ioasid_data = xa_load(&idata->xa, ioasid);
  354. if (!ioasid_data) {
  355. priv = ERR_PTR(-ENOENT);
  356. goto unlock;
  357. }
  358. if (set && ioasid_data->set != set) {
  359. /* data found but does not belong to the set */
  360. priv = ERR_PTR(-EACCES);
  361. goto unlock;
  362. }
  363. /* Now IOASID and its set is verified, we can return the private data */
  364. priv = rcu_dereference(ioasid_data->private);
  365. if (getter && !getter(priv))
  366. priv = NULL;
  367. unlock:
  368. rcu_read_unlock();
  369. return priv;
  370. }
  371. EXPORT_SYMBOL_GPL(ioasid_find);
  372. MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
  373. MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
  374. MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator");
  375. MODULE_LICENSE("GPL");