drm_mm.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. /**************************************************************************
  2. *
  3. * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
  4. * Copyright 2016 Intel Corporation
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. *
  28. **************************************************************************/
  29. /*
  30. * Generic simple memory manager implementation. Intended to be used as a base
  31. * class implementation for more advanced memory managers.
  32. *
  33. * Note that the algorithm used is quite simple and there might be substantial
  34. * performance gains if a smarter free list is implemented. Currently it is
  35. * just an unordered stack of free regions. This could easily be improved if
  36. * an RB-tree is used instead. At least if we expect heavy fragmentation.
  37. *
  38. * Aligned allocations can also see improvement.
  39. *
  40. * Authors:
  41. * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  42. */
  43. #include <linux/export.h>
  44. #include <linux/interval_tree_generic.h>
  45. #include <linux/seq_file.h>
  46. #include <linux/slab.h>
  47. #include <linux/stacktrace.h>
  48. #include <drm/drm_mm.h>
  49. /**
  50. * DOC: Overview
  51. *
  52. * drm_mm provides a simple range allocator. The drivers are free to use the
  53. * resource allocator from the linux core if it suits them, the upside of drm_mm
  54. * is that it's in the DRM core. Which means that it's easier to extend for
  55. * some of the crazier special purpose needs of gpus.
  56. *
  57. * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
  58. * Drivers are free to embed either of them into their own suitable
  59. * datastructures. drm_mm itself will not do any memory allocations of its own,
  60. * so if drivers choose not to embed nodes they need to still allocate them
  61. * themselves.
  62. *
  63. * The range allocator also supports reservation of preallocated blocks. This is
  64. * useful for taking over initial mode setting configurations from the firmware,
  65. * where an object needs to be created which exactly matches the firmware's
  66. * scanout target. As long as the range is still free it can be inserted anytime
  67. * after the allocator is initialized, which helps with avoiding looped
  68. * dependencies in the driver load sequence.
  69. *
  70. * drm_mm maintains a stack of most recently freed holes, which of all
  71. * simplistic datastructures seems to be a fairly decent approach to clustering
  72. * allocations and avoiding too much fragmentation. This means free space
  73. * searches are O(num_holes). Given that all the fancy features drm_mm supports
  74. * something better would be fairly complex and since gfx thrashing is a fairly
  75. * steep cliff not a real concern. Removing a node again is O(1).
  76. *
  77. * drm_mm supports a few features: Alignment and range restrictions can be
  78. * supplied. Furthermore every &drm_mm_node has a color value (which is just an
  79. * opaque unsigned long) which in conjunction with a driver callback can be used
  80. * to implement sophisticated placement restrictions. The i915 DRM driver uses
  81. * this to implement guard pages between incompatible caching domains in the
  82. * graphics TT.
  83. *
  84. * Two behaviors are supported for searching and allocating: bottom-up and
  85. * top-down. The default is bottom-up. Top-down allocation can be used if the
  86. * memory area has different restrictions, or just to reduce fragmentation.
  87. *
  88. * Finally iteration helpers to walk all nodes and all holes are provided as are
  89. * some basic allocator dumpers for debugging.
  90. *
  91. * Note that this range allocator is not thread-safe, drivers need to protect
  92. * modifications with their own locking. The idea behind this is that for a full
  93. * memory manager additional data needs to be protected anyway, hence internal
  94. * locking would be fully redundant.
  95. */
  96. #ifdef CONFIG_DRM_DEBUG_MM
  97. #include <linux/stackdepot.h>
  98. #define STACKDEPTH 32
  99. #define BUFSZ 4096
  100. static noinline void save_stack(struct drm_mm_node *node)
  101. {
  102. unsigned long entries[STACKDEPTH];
  103. unsigned int n;
  104. n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
  105. /* May be called under spinlock, so avoid sleeping */
  106. node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
  107. }
  108. static void show_leaks(struct drm_mm *mm)
  109. {
  110. struct drm_mm_node *node;
  111. unsigned long *entries;
  112. unsigned int nr_entries;
  113. char *buf;
  114. buf = kmalloc(BUFSZ, GFP_KERNEL);
  115. if (!buf)
  116. return;
  117. list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
  118. if (!node->stack) {
  119. DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
  120. node->start, node->size);
  121. continue;
  122. }
  123. nr_entries = stack_depot_fetch(node->stack, &entries);
  124. stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
  125. DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
  126. node->start, node->size, buf);
  127. }
  128. kfree(buf);
  129. }
  130. #undef STACKDEPTH
  131. #undef BUFSZ
  132. #else
  133. static void save_stack(struct drm_mm_node *node) { }
  134. static void show_leaks(struct drm_mm *mm) { }
  135. #endif
  136. #define START(node) ((node)->start)
  137. #define LAST(node) ((node)->start + (node)->size - 1)
  138. INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
  139. u64, __subtree_last,
  140. START, LAST, static inline, drm_mm_interval_tree)
  141. struct drm_mm_node *
  142. __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
  143. {
  144. return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
  145. start, last) ?: (struct drm_mm_node *)&mm->head_node;
  146. }
  147. EXPORT_SYMBOL(__drm_mm_interval_first);
  148. static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
  149. struct drm_mm_node *node)
  150. {
  151. struct drm_mm *mm = hole_node->mm;
  152. struct rb_node **link, *rb;
  153. struct drm_mm_node *parent;
  154. bool leftmost;
  155. node->__subtree_last = LAST(node);
  156. if (drm_mm_node_allocated(hole_node)) {
  157. rb = &hole_node->rb;
  158. while (rb) {
  159. parent = rb_entry(rb, struct drm_mm_node, rb);
  160. if (parent->__subtree_last >= node->__subtree_last)
  161. break;
  162. parent->__subtree_last = node->__subtree_last;
  163. rb = rb_parent(rb);
  164. }
  165. rb = &hole_node->rb;
  166. link = &hole_node->rb.rb_right;
  167. leftmost = false;
  168. } else {
  169. rb = NULL;
  170. link = &mm->interval_tree.rb_root.rb_node;
  171. leftmost = true;
  172. }
  173. while (*link) {
  174. rb = *link;
  175. parent = rb_entry(rb, struct drm_mm_node, rb);
  176. if (parent->__subtree_last < node->__subtree_last)
  177. parent->__subtree_last = node->__subtree_last;
  178. if (node->start < parent->start) {
  179. link = &parent->rb.rb_left;
  180. } else {
  181. link = &parent->rb.rb_right;
  182. leftmost = false;
  183. }
  184. }
  185. rb_link_node(&node->rb, rb, link);
  186. rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
  187. &drm_mm_interval_tree_augment);
  188. }
  189. #define HOLE_SIZE(NODE) ((NODE)->hole_size)
  190. #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
  191. static u64 rb_to_hole_size(struct rb_node *rb)
  192. {
  193. return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
  194. }
  195. static void insert_hole_size(struct rb_root_cached *root,
  196. struct drm_mm_node *node)
  197. {
  198. struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
  199. u64 x = node->hole_size;
  200. bool first = true;
  201. while (*link) {
  202. rb = *link;
  203. if (x > rb_to_hole_size(rb)) {
  204. link = &rb->rb_left;
  205. } else {
  206. link = &rb->rb_right;
  207. first = false;
  208. }
  209. }
  210. rb_link_node(&node->rb_hole_size, rb, link);
  211. rb_insert_color_cached(&node->rb_hole_size, root, first);
  212. }
  213. RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
  214. struct drm_mm_node, rb_hole_addr,
  215. u64, subtree_max_hole, HOLE_SIZE)
  216. static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
  217. {
  218. struct rb_node **link = &root->rb_node, *rb_parent = NULL;
  219. u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
  220. struct drm_mm_node *parent;
  221. while (*link) {
  222. rb_parent = *link;
  223. parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
  224. if (parent->subtree_max_hole < subtree_max_hole)
  225. parent->subtree_max_hole = subtree_max_hole;
  226. if (start < HOLE_ADDR(parent))
  227. link = &parent->rb_hole_addr.rb_left;
  228. else
  229. link = &parent->rb_hole_addr.rb_right;
  230. }
  231. rb_link_node(&node->rb_hole_addr, rb_parent, link);
  232. rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
  233. }
  234. static void add_hole(struct drm_mm_node *node)
  235. {
  236. struct drm_mm *mm = node->mm;
  237. node->hole_size =
  238. __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
  239. node->subtree_max_hole = node->hole_size;
  240. DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
  241. insert_hole_size(&mm->holes_size, node);
  242. insert_hole_addr(&mm->holes_addr, node);
  243. list_add(&node->hole_stack, &mm->hole_stack);
  244. }
  245. static void rm_hole(struct drm_mm_node *node)
  246. {
  247. DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
  248. list_del(&node->hole_stack);
  249. rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
  250. rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
  251. &augment_callbacks);
  252. node->hole_size = 0;
  253. node->subtree_max_hole = 0;
  254. DRM_MM_BUG_ON(drm_mm_hole_follows(node));
  255. }
  256. static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
  257. {
  258. return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
  259. }
  260. static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
  261. {
  262. return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
  263. }
  264. static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
  265. {
  266. struct rb_node *rb = mm->holes_size.rb_root.rb_node;
  267. struct drm_mm_node *best = NULL;
  268. do {
  269. struct drm_mm_node *node =
  270. rb_entry(rb, struct drm_mm_node, rb_hole_size);
  271. if (size <= node->hole_size) {
  272. best = node;
  273. rb = rb->rb_right;
  274. } else {
  275. rb = rb->rb_left;
  276. }
  277. } while (rb);
  278. return best;
  279. }
  280. static bool usable_hole_addr(struct rb_node *rb, u64 size)
  281. {
  282. return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size;
  283. }
  284. static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
  285. {
  286. struct rb_node *rb = mm->holes_addr.rb_node;
  287. struct drm_mm_node *node = NULL;
  288. while (rb) {
  289. u64 hole_start;
  290. if (!usable_hole_addr(rb, size))
  291. break;
  292. node = rb_hole_addr_to_node(rb);
  293. hole_start = __drm_mm_hole_node_start(node);
  294. if (addr < hole_start)
  295. rb = node->rb_hole_addr.rb_left;
  296. else if (addr > hole_start + node->hole_size)
  297. rb = node->rb_hole_addr.rb_right;
  298. else
  299. break;
  300. }
  301. return node;
  302. }
  303. static struct drm_mm_node *
  304. first_hole(struct drm_mm *mm,
  305. u64 start, u64 end, u64 size,
  306. enum drm_mm_insert_mode mode)
  307. {
  308. switch (mode) {
  309. default:
  310. case DRM_MM_INSERT_BEST:
  311. return best_hole(mm, size);
  312. case DRM_MM_INSERT_LOW:
  313. return find_hole_addr(mm, start, size);
  314. case DRM_MM_INSERT_HIGH:
  315. return find_hole_addr(mm, end, size);
  316. case DRM_MM_INSERT_EVICT:
  317. return list_first_entry_or_null(&mm->hole_stack,
  318. struct drm_mm_node,
  319. hole_stack);
  320. }
  321. }
  322. /**
  323. * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions
  324. * @name: name of function to declare
  325. * @first: first rb member to traverse (either rb_left or rb_right).
  326. * @last: last rb member to traverse (either rb_right or rb_left).
  327. *
  328. * This macro declares a function to return the next hole of the addr rb tree.
  329. * While traversing the tree we take the searched size into account and only
  330. * visit branches with potential big enough holes.
  331. */
  332. #define DECLARE_NEXT_HOLE_ADDR(name, first, last) \
  333. static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \
  334. { \
  335. struct rb_node *parent, *node = &entry->rb_hole_addr; \
  336. \
  337. if (!entry || RB_EMPTY_NODE(node)) \
  338. return NULL; \
  339. \
  340. if (usable_hole_addr(node->first, size)) { \
  341. node = node->first; \
  342. while (usable_hole_addr(node->last, size)) \
  343. node = node->last; \
  344. return rb_hole_addr_to_node(node); \
  345. } \
  346. \
  347. while ((parent = rb_parent(node)) && node == parent->first) \
  348. node = parent; \
  349. \
  350. return rb_hole_addr_to_node(parent); \
  351. }
  352. DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right)
  353. DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left)
  354. static struct drm_mm_node *
  355. next_hole(struct drm_mm *mm,
  356. struct drm_mm_node *node,
  357. u64 size,
  358. enum drm_mm_insert_mode mode)
  359. {
  360. switch (mode) {
  361. default:
  362. case DRM_MM_INSERT_BEST:
  363. return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
  364. case DRM_MM_INSERT_LOW:
  365. return next_hole_low_addr(node, size);
  366. case DRM_MM_INSERT_HIGH:
  367. return next_hole_high_addr(node, size);
  368. case DRM_MM_INSERT_EVICT:
  369. node = list_next_entry(node, hole_stack);
  370. return &node->hole_stack == &mm->hole_stack ? NULL : node;
  371. }
  372. }
  373. /**
  374. * drm_mm_reserve_node - insert an pre-initialized node
  375. * @mm: drm_mm allocator to insert @node into
  376. * @node: drm_mm_node to insert
  377. *
  378. * This functions inserts an already set-up &drm_mm_node into the allocator,
  379. * meaning that start, size and color must be set by the caller. All other
  380. * fields must be cleared to 0. This is useful to initialize the allocator with
  381. * preallocated objects which must be set-up before the range allocator can be
  382. * set-up, e.g. when taking over a firmware framebuffer.
  383. *
  384. * Returns:
  385. * 0 on success, -ENOSPC if there's no hole where @node is.
  386. */
  387. int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
  388. {
  389. struct drm_mm_node *hole;
  390. u64 hole_start, hole_end;
  391. u64 adj_start, adj_end;
  392. u64 end;
  393. end = node->start + node->size;
  394. if (unlikely(end <= node->start))
  395. return -ENOSPC;
  396. /* Find the relevant hole to add our node to */
  397. hole = find_hole_addr(mm, node->start, 0);
  398. if (!hole)
  399. return -ENOSPC;
  400. adj_start = hole_start = __drm_mm_hole_node_start(hole);
  401. adj_end = hole_end = hole_start + hole->hole_size;
  402. if (mm->color_adjust)
  403. mm->color_adjust(hole, node->color, &adj_start, &adj_end);
  404. if (adj_start > node->start || adj_end < end)
  405. return -ENOSPC;
  406. node->mm = mm;
  407. __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
  408. list_add(&node->node_list, &hole->node_list);
  409. drm_mm_interval_tree_add_node(hole, node);
  410. node->hole_size = 0;
  411. rm_hole(hole);
  412. if (node->start > hole_start)
  413. add_hole(hole);
  414. if (end < hole_end)
  415. add_hole(node);
  416. save_stack(node);
  417. return 0;
  418. }
  419. EXPORT_SYMBOL(drm_mm_reserve_node);
  420. static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
  421. {
  422. return rb ? rb_to_hole_size(rb) : 0;
  423. }
  424. /**
  425. * drm_mm_insert_node_in_range - ranged search for space and insert @node
  426. * @mm: drm_mm to allocate from
  427. * @node: preallocate node to insert
  428. * @size: size of the allocation
  429. * @alignment: alignment of the allocation
  430. * @color: opaque tag value to use for this node
  431. * @range_start: start of the allowed range for this node
  432. * @range_end: end of the allowed range for this node
  433. * @mode: fine-tune the allocation search and placement
  434. *
  435. * The preallocated @node must be cleared to 0.
  436. *
  437. * Returns:
  438. * 0 on success, -ENOSPC if there's no suitable hole.
  439. */
  440. int drm_mm_insert_node_in_range(struct drm_mm * const mm,
  441. struct drm_mm_node * const node,
  442. u64 size, u64 alignment,
  443. unsigned long color,
  444. u64 range_start, u64 range_end,
  445. enum drm_mm_insert_mode mode)
  446. {
  447. struct drm_mm_node *hole;
  448. u64 remainder_mask;
  449. bool once;
  450. DRM_MM_BUG_ON(range_start > range_end);
  451. if (unlikely(size == 0 || range_end - range_start < size))
  452. return -ENOSPC;
  453. if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
  454. return -ENOSPC;
  455. if (alignment <= 1)
  456. alignment = 0;
  457. once = mode & DRM_MM_INSERT_ONCE;
  458. mode &= ~DRM_MM_INSERT_ONCE;
  459. remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
  460. for (hole = first_hole(mm, range_start, range_end, size, mode);
  461. hole;
  462. hole = once ? NULL : next_hole(mm, hole, size, mode)) {
  463. u64 hole_start = __drm_mm_hole_node_start(hole);
  464. u64 hole_end = hole_start + hole->hole_size;
  465. u64 adj_start, adj_end;
  466. u64 col_start, col_end;
  467. if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
  468. break;
  469. if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
  470. break;
  471. col_start = hole_start;
  472. col_end = hole_end;
  473. if (mm->color_adjust)
  474. mm->color_adjust(hole, color, &col_start, &col_end);
  475. adj_start = max(col_start, range_start);
  476. adj_end = min(col_end, range_end);
  477. if (adj_end <= adj_start || adj_end - adj_start < size)
  478. continue;
  479. if (mode == DRM_MM_INSERT_HIGH)
  480. adj_start = adj_end - size;
  481. if (alignment) {
  482. u64 rem;
  483. if (likely(remainder_mask))
  484. rem = adj_start & remainder_mask;
  485. else
  486. div64_u64_rem(adj_start, alignment, &rem);
  487. if (rem) {
  488. adj_start -= rem;
  489. if (mode != DRM_MM_INSERT_HIGH)
  490. adj_start += alignment;
  491. if (adj_start < max(col_start, range_start) ||
  492. min(col_end, range_end) - adj_start < size)
  493. continue;
  494. if (adj_end <= adj_start ||
  495. adj_end - adj_start < size)
  496. continue;
  497. }
  498. }
  499. node->mm = mm;
  500. node->size = size;
  501. node->start = adj_start;
  502. node->color = color;
  503. node->hole_size = 0;
  504. __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
  505. list_add(&node->node_list, &hole->node_list);
  506. drm_mm_interval_tree_add_node(hole, node);
  507. rm_hole(hole);
  508. if (adj_start > hole_start)
  509. add_hole(hole);
  510. if (adj_start + size < hole_end)
  511. add_hole(node);
  512. save_stack(node);
  513. return 0;
  514. }
  515. return -ENOSPC;
  516. }
  517. EXPORT_SYMBOL(drm_mm_insert_node_in_range);
  518. static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
  519. {
  520. return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
  521. }
  522. /**
  523. * drm_mm_remove_node - Remove a memory node from the allocator.
  524. * @node: drm_mm_node to remove
  525. *
  526. * This just removes a node from its drm_mm allocator. The node does not need to
  527. * be cleared again before it can be re-inserted into this or any other drm_mm
  528. * allocator. It is a bug to call this function on a unallocated node.
  529. */
  530. void drm_mm_remove_node(struct drm_mm_node *node)
  531. {
  532. struct drm_mm *mm = node->mm;
  533. struct drm_mm_node *prev_node;
  534. DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
  535. DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
  536. prev_node = list_prev_entry(node, node_list);
  537. if (drm_mm_hole_follows(node))
  538. rm_hole(node);
  539. drm_mm_interval_tree_remove(node, &mm->interval_tree);
  540. list_del(&node->node_list);
  541. if (drm_mm_hole_follows(prev_node))
  542. rm_hole(prev_node);
  543. add_hole(prev_node);
  544. clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
  545. }
  546. EXPORT_SYMBOL(drm_mm_remove_node);
  547. /**
  548. * drm_mm_replace_node - move an allocation from @old to @new
  549. * @old: drm_mm_node to remove from the allocator
  550. * @new: drm_mm_node which should inherit @old's allocation
  551. *
  552. * This is useful for when drivers embed the drm_mm_node structure and hence
  553. * can't move allocations by reassigning pointers. It's a combination of remove
  554. * and insert with the guarantee that the allocation start will match.
  555. */
  556. void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
  557. {
  558. struct drm_mm *mm = old->mm;
  559. DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
  560. *new = *old;
  561. __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
  562. list_replace(&old->node_list, &new->node_list);
  563. rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
  564. if (drm_mm_hole_follows(old)) {
  565. list_replace(&old->hole_stack, &new->hole_stack);
  566. rb_replace_node_cached(&old->rb_hole_size,
  567. &new->rb_hole_size,
  568. &mm->holes_size);
  569. rb_replace_node(&old->rb_hole_addr,
  570. &new->rb_hole_addr,
  571. &mm->holes_addr);
  572. }
  573. clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
  574. }
  575. EXPORT_SYMBOL(drm_mm_replace_node);
  576. /**
  577. * DOC: lru scan roster
  578. *
  579. * Very often GPUs need to have continuous allocations for a given object. When
  580. * evicting objects to make space for a new one it is therefore not most
  581. * efficient when we simply start to select all objects from the tail of an LRU
  582. * until there's a suitable hole: Especially for big objects or nodes that
  583. * otherwise have special allocation constraints there's a good chance we evict
  584. * lots of (smaller) objects unnecessarily.
  585. *
  586. * The DRM range allocator supports this use-case through the scanning
  587. * interfaces. First a scan operation needs to be initialized with
  588. * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
  589. * objects to the roster, probably by walking an LRU list, but this can be
  590. * freely implemented. Eviction candiates are added using
  591. * drm_mm_scan_add_block() until a suitable hole is found or there are no
  592. * further evictable objects. Eviction roster metadata is tracked in &struct
  593. * drm_mm_scan.
  594. *
  595. * The driver must walk through all objects again in exactly the reverse
  596. * order to restore the allocator state. Note that while the allocator is used
  597. * in the scan mode no other operation is allowed.
  598. *
  599. * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
  600. * reported true) in the scan, and any overlapping nodes after color adjustment
  601. * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
  602. * since freeing a node is also O(1) the overall complexity is
  603. * O(scanned_objects). So like the free stack which needs to be walked before a
  604. * scan operation even begins this is linear in the number of objects. It
  605. * doesn't seem to hurt too badly.
  606. */
  607. /**
  608. * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
  609. * @scan: scan state
  610. * @mm: drm_mm to scan
  611. * @size: size of the allocation
  612. * @alignment: alignment of the allocation
  613. * @color: opaque tag value to use for the allocation
  614. * @start: start of the allowed range for the allocation
  615. * @end: end of the allowed range for the allocation
  616. * @mode: fine-tune the allocation search and placement
  617. *
  618. * This simply sets up the scanning routines with the parameters for the desired
  619. * hole.
  620. *
  621. * Warning:
  622. * As long as the scan list is non-empty, no other operations than
  623. * adding/removing nodes to/from the scan list are allowed.
  624. */
  625. void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
  626. struct drm_mm *mm,
  627. u64 size,
  628. u64 alignment,
  629. unsigned long color,
  630. u64 start,
  631. u64 end,
  632. enum drm_mm_insert_mode mode)
  633. {
  634. DRM_MM_BUG_ON(start >= end);
  635. DRM_MM_BUG_ON(!size || size > end - start);
  636. DRM_MM_BUG_ON(mm->scan_active);
  637. scan->mm = mm;
  638. if (alignment <= 1)
  639. alignment = 0;
  640. scan->color = color;
  641. scan->alignment = alignment;
  642. scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
  643. scan->size = size;
  644. scan->mode = mode;
  645. DRM_MM_BUG_ON(end <= start);
  646. scan->range_start = start;
  647. scan->range_end = end;
  648. scan->hit_start = U64_MAX;
  649. scan->hit_end = 0;
  650. }
  651. EXPORT_SYMBOL(drm_mm_scan_init_with_range);
  652. /**
  653. * drm_mm_scan_add_block - add a node to the scan list
  654. * @scan: the active drm_mm scanner
  655. * @node: drm_mm_node to add
  656. *
  657. * Add a node to the scan list that might be freed to make space for the desired
  658. * hole.
  659. *
  660. * Returns:
  661. * True if a hole has been found, false otherwise.
  662. */
  663. bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
  664. struct drm_mm_node *node)
  665. {
  666. struct drm_mm *mm = scan->mm;
  667. struct drm_mm_node *hole;
  668. u64 hole_start, hole_end;
  669. u64 col_start, col_end;
  670. u64 adj_start, adj_end;
  671. DRM_MM_BUG_ON(node->mm != mm);
  672. DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
  673. DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
  674. __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
  675. mm->scan_active++;
  676. /* Remove this block from the node_list so that we enlarge the hole
  677. * (distance between the end of our previous node and the start of
  678. * or next), without poisoning the link so that we can restore it
  679. * later in drm_mm_scan_remove_block().
  680. */
  681. hole = list_prev_entry(node, node_list);
  682. DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
  683. __list_del_entry(&node->node_list);
  684. hole_start = __drm_mm_hole_node_start(hole);
  685. hole_end = __drm_mm_hole_node_end(hole);
  686. col_start = hole_start;
  687. col_end = hole_end;
  688. if (mm->color_adjust)
  689. mm->color_adjust(hole, scan->color, &col_start, &col_end);
  690. adj_start = max(col_start, scan->range_start);
  691. adj_end = min(col_end, scan->range_end);
  692. if (adj_end <= adj_start || adj_end - adj_start < scan->size)
  693. return false;
  694. if (scan->mode == DRM_MM_INSERT_HIGH)
  695. adj_start = adj_end - scan->size;
  696. if (scan->alignment) {
  697. u64 rem;
  698. if (likely(scan->remainder_mask))
  699. rem = adj_start & scan->remainder_mask;
  700. else
  701. div64_u64_rem(adj_start, scan->alignment, &rem);
  702. if (rem) {
  703. adj_start -= rem;
  704. if (scan->mode != DRM_MM_INSERT_HIGH)
  705. adj_start += scan->alignment;
  706. if (adj_start < max(col_start, scan->range_start) ||
  707. min(col_end, scan->range_end) - adj_start < scan->size)
  708. return false;
  709. if (adj_end <= adj_start ||
  710. adj_end - adj_start < scan->size)
  711. return false;
  712. }
  713. }
  714. scan->hit_start = adj_start;
  715. scan->hit_end = adj_start + scan->size;
  716. DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
  717. DRM_MM_BUG_ON(scan->hit_start < hole_start);
  718. DRM_MM_BUG_ON(scan->hit_end > hole_end);
  719. return true;
  720. }
  721. EXPORT_SYMBOL(drm_mm_scan_add_block);
  722. /**
  723. * drm_mm_scan_remove_block - remove a node from the scan list
  724. * @scan: the active drm_mm scanner
  725. * @node: drm_mm_node to remove
  726. *
  727. * Nodes **must** be removed in exactly the reverse order from the scan list as
  728. * they have been added (e.g. using list_add() as they are added and then
  729. * list_for_each() over that eviction list to remove), otherwise the internal
  730. * state of the memory manager will be corrupted.
  731. *
  732. * When the scan list is empty, the selected memory nodes can be freed. An
  733. * immediately following drm_mm_insert_node_in_range_generic() or one of the
  734. * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
  735. * the just freed block (because it's at the top of the free_stack list).
  736. *
  737. * Returns:
  738. * True if this block should be evicted, false otherwise. Will always
  739. * return false when no hole has been found.
  740. */
  741. bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
  742. struct drm_mm_node *node)
  743. {
  744. struct drm_mm_node *prev_node;
  745. DRM_MM_BUG_ON(node->mm != scan->mm);
  746. DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
  747. __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
  748. DRM_MM_BUG_ON(!node->mm->scan_active);
  749. node->mm->scan_active--;
  750. /* During drm_mm_scan_add_block() we decoupled this node leaving
  751. * its pointers intact. Now that the caller is walking back along
  752. * the eviction list we can restore this block into its rightful
  753. * place on the full node_list. To confirm that the caller is walking
  754. * backwards correctly we check that prev_node->next == node->next,
  755. * i.e. both believe the same node should be on the other side of the
  756. * hole.
  757. */
  758. prev_node = list_prev_entry(node, node_list);
  759. DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
  760. list_next_entry(node, node_list));
  761. list_add(&node->node_list, &prev_node->node_list);
  762. return (node->start + node->size > scan->hit_start &&
  763. node->start < scan->hit_end);
  764. }
  765. EXPORT_SYMBOL(drm_mm_scan_remove_block);
  766. /**
  767. * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
  768. * @scan: drm_mm scan with target hole
  769. *
  770. * After completing an eviction scan and removing the selected nodes, we may
  771. * need to remove a few more nodes from either side of the target hole if
  772. * mm.color_adjust is being used.
  773. *
  774. * Returns:
  775. * A node to evict, or NULL if there are no overlapping nodes.
  776. */
  777. struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
  778. {
  779. struct drm_mm *mm = scan->mm;
  780. struct drm_mm_node *hole;
  781. u64 hole_start, hole_end;
  782. DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
  783. if (!mm->color_adjust)
  784. return NULL;
  785. /*
  786. * The hole found during scanning should ideally be the first element
  787. * in the hole_stack list, but due to side-effects in the driver it
  788. * may not be.
  789. */
  790. list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
  791. hole_start = __drm_mm_hole_node_start(hole);
  792. hole_end = hole_start + hole->hole_size;
  793. if (hole_start <= scan->hit_start &&
  794. hole_end >= scan->hit_end)
  795. break;
  796. }
  797. /* We should only be called after we found the hole previously */
  798. DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
  799. if (unlikely(&hole->hole_stack == &mm->hole_stack))
  800. return NULL;
  801. DRM_MM_BUG_ON(hole_start > scan->hit_start);
  802. DRM_MM_BUG_ON(hole_end < scan->hit_end);
  803. mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
  804. if (hole_start > scan->hit_start)
  805. return hole;
  806. if (hole_end < scan->hit_end)
  807. return list_next_entry(hole, node_list);
  808. return NULL;
  809. }
  810. EXPORT_SYMBOL(drm_mm_scan_color_evict);
  811. /**
  812. * drm_mm_init - initialize a drm-mm allocator
  813. * @mm: the drm_mm structure to initialize
  814. * @start: start of the range managed by @mm
  815. * @size: end of the range managed by @mm
  816. *
  817. * Note that @mm must be cleared to 0 before calling this function.
  818. */
  819. void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
  820. {
  821. DRM_MM_BUG_ON(start + size <= start);
  822. mm->color_adjust = NULL;
  823. INIT_LIST_HEAD(&mm->hole_stack);
  824. mm->interval_tree = RB_ROOT_CACHED;
  825. mm->holes_size = RB_ROOT_CACHED;
  826. mm->holes_addr = RB_ROOT;
  827. /* Clever trick to avoid a special case in the free hole tracking. */
  828. INIT_LIST_HEAD(&mm->head_node.node_list);
  829. mm->head_node.flags = 0;
  830. mm->head_node.mm = mm;
  831. mm->head_node.start = start + size;
  832. mm->head_node.size = -size;
  833. add_hole(&mm->head_node);
  834. mm->scan_active = 0;
  835. }
  836. EXPORT_SYMBOL(drm_mm_init);
  837. /**
  838. * drm_mm_takedown - clean up a drm_mm allocator
  839. * @mm: drm_mm allocator to clean up
  840. *
  841. * Note that it is a bug to call this function on an allocator which is not
  842. * clean.
  843. */
  844. void drm_mm_takedown(struct drm_mm *mm)
  845. {
  846. if (WARN(!drm_mm_clean(mm),
  847. "Memory manager not clean during takedown.\n"))
  848. show_leaks(mm);
  849. }
  850. EXPORT_SYMBOL(drm_mm_takedown);
  851. static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
  852. {
  853. u64 start, size;
  854. size = entry->hole_size;
  855. if (size) {
  856. start = drm_mm_hole_node_start(entry);
  857. drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
  858. start, start + size, size);
  859. }
  860. return size;
  861. }
  862. /**
  863. * drm_mm_print - print allocator state
  864. * @mm: drm_mm allocator to print
  865. * @p: DRM printer to use
  866. */
  867. void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
  868. {
  869. const struct drm_mm_node *entry;
  870. u64 total_used = 0, total_free = 0, total = 0;
  871. total_free += drm_mm_dump_hole(p, &mm->head_node);
  872. drm_mm_for_each_node(entry, mm) {
  873. drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
  874. entry->start + entry->size, entry->size);
  875. total_used += entry->size;
  876. total_free += drm_mm_dump_hole(p, entry);
  877. }
  878. total = total_free + total_used;
  879. drm_printf(p, "total: %llu, used %llu free %llu\n", total,
  880. total_used, total_free);
  881. }
  882. EXPORT_SYMBOL(drm_mm_print);