tsb.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* arch/sparc64/mm/tsb.c
  3. *
  4. * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/preempt.h>
  8. #include <linux/slab.h>
  9. #include <linux/mm_types.h>
  10. #include <linux/pgtable.h>
  11. #include <asm/page.h>
  12. #include <asm/mmu_context.h>
  13. #include <asm/setup.h>
  14. #include <asm/tsb.h>
  15. #include <asm/tlb.h>
  16. #include <asm/oplib.h>
  17. extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
  18. static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
  19. {
  20. vaddr >>= hash_shift;
  21. return vaddr & (nentries - 1);
  22. }
  23. static inline int tag_compare(unsigned long tag, unsigned long vaddr)
  24. {
  25. return (tag == (vaddr >> 22));
  26. }
  27. static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
  28. {
  29. unsigned long idx;
  30. for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
  31. struct tsb *ent = &swapper_tsb[idx];
  32. unsigned long match = idx << 13;
  33. match |= (ent->tag << 22);
  34. if (match >= start && match < end)
  35. ent->tag = (1UL << TSB_TAG_INVALID_BIT);
  36. }
  37. }
  38. /* TSB flushes need only occur on the processor initiating the address
  39. * space modification, not on each cpu the address space has run on.
  40. * Only the TLB flush needs that treatment.
  41. */
  42. void flush_tsb_kernel_range(unsigned long start, unsigned long end)
  43. {
  44. unsigned long v;
  45. if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
  46. return flush_tsb_kernel_range_scan(start, end);
  47. for (v = start; v < end; v += PAGE_SIZE) {
  48. unsigned long hash = tsb_hash(v, PAGE_SHIFT,
  49. KERNEL_TSB_NENTRIES);
  50. struct tsb *ent = &swapper_tsb[hash];
  51. if (tag_compare(ent->tag, v))
  52. ent->tag = (1UL << TSB_TAG_INVALID_BIT);
  53. }
  54. }
  55. static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
  56. unsigned long hash_shift,
  57. unsigned long nentries)
  58. {
  59. unsigned long tag, ent, hash;
  60. v &= ~0x1UL;
  61. hash = tsb_hash(v, hash_shift, nentries);
  62. ent = tsb + (hash * sizeof(struct tsb));
  63. tag = (v >> 22UL);
  64. tsb_flush(ent, tag);
  65. }
  66. static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
  67. unsigned long tsb, unsigned long nentries)
  68. {
  69. unsigned long i;
  70. for (i = 0; i < tb->tlb_nr; i++)
  71. __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
  72. }
  73. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  74. static void __flush_huge_tsb_one_entry(unsigned long tsb, unsigned long v,
  75. unsigned long hash_shift,
  76. unsigned long nentries,
  77. unsigned int hugepage_shift)
  78. {
  79. unsigned int hpage_entries;
  80. unsigned int i;
  81. hpage_entries = 1 << (hugepage_shift - hash_shift);
  82. for (i = 0; i < hpage_entries; i++)
  83. __flush_tsb_one_entry(tsb, v + (i << hash_shift), hash_shift,
  84. nentries);
  85. }
  86. static void __flush_huge_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
  87. unsigned long tsb, unsigned long nentries,
  88. unsigned int hugepage_shift)
  89. {
  90. unsigned long i;
  91. for (i = 0; i < tb->tlb_nr; i++)
  92. __flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift,
  93. nentries, hugepage_shift);
  94. }
  95. #endif
  96. void flush_tsb_user(struct tlb_batch *tb)
  97. {
  98. struct mm_struct *mm = tb->mm;
  99. unsigned long nentries, base, flags;
  100. spin_lock_irqsave(&mm->context.lock, flags);
  101. if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
  102. base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
  103. nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
  104. if (tlb_type == cheetah_plus || tlb_type == hypervisor)
  105. base = __pa(base);
  106. if (tb->hugepage_shift == PAGE_SHIFT)
  107. __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
  108. #if defined(CONFIG_HUGETLB_PAGE)
  109. else
  110. __flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries,
  111. tb->hugepage_shift);
  112. #endif
  113. }
  114. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  115. else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
  116. base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
  117. nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
  118. if (tlb_type == cheetah_plus || tlb_type == hypervisor)
  119. base = __pa(base);
  120. __flush_huge_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries,
  121. tb->hugepage_shift);
  122. }
  123. #endif
  124. spin_unlock_irqrestore(&mm->context.lock, flags);
  125. }
  126. void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
  127. unsigned int hugepage_shift)
  128. {
  129. unsigned long nentries, base, flags;
  130. spin_lock_irqsave(&mm->context.lock, flags);
  131. if (hugepage_shift < REAL_HPAGE_SHIFT) {
  132. base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
  133. nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
  134. if (tlb_type == cheetah_plus || tlb_type == hypervisor)
  135. base = __pa(base);
  136. if (hugepage_shift == PAGE_SHIFT)
  137. __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT,
  138. nentries);
  139. #if defined(CONFIG_HUGETLB_PAGE)
  140. else
  141. __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT,
  142. nentries, hugepage_shift);
  143. #endif
  144. }
  145. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  146. else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
  147. base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
  148. nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
  149. if (tlb_type == cheetah_plus || tlb_type == hypervisor)
  150. base = __pa(base);
  151. __flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT,
  152. nentries, hugepage_shift);
  153. }
  154. #endif
  155. spin_unlock_irqrestore(&mm->context.lock, flags);
  156. }
  157. #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
  158. #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
  159. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  160. #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
  161. #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
  162. #endif
  163. static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
  164. {
  165. unsigned long tsb_reg, base, tsb_paddr;
  166. unsigned long page_sz, tte;
  167. mm->context.tsb_block[tsb_idx].tsb_nentries =
  168. tsb_bytes / sizeof(struct tsb);
  169. switch (tsb_idx) {
  170. case MM_TSB_BASE:
  171. base = TSBMAP_8K_BASE;
  172. break;
  173. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  174. case MM_TSB_HUGE:
  175. base = TSBMAP_4M_BASE;
  176. break;
  177. #endif
  178. default:
  179. BUG();
  180. }
  181. tte = pgprot_val(PAGE_KERNEL_LOCKED);
  182. tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
  183. BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
  184. /* Use the smallest page size that can map the whole TSB
  185. * in one TLB entry.
  186. */
  187. switch (tsb_bytes) {
  188. case 8192 << 0:
  189. tsb_reg = 0x0UL;
  190. #ifdef DCACHE_ALIASING_POSSIBLE
  191. base += (tsb_paddr & 8192);
  192. #endif
  193. page_sz = 8192;
  194. break;
  195. case 8192 << 1:
  196. tsb_reg = 0x1UL;
  197. page_sz = 64 * 1024;
  198. break;
  199. case 8192 << 2:
  200. tsb_reg = 0x2UL;
  201. page_sz = 64 * 1024;
  202. break;
  203. case 8192 << 3:
  204. tsb_reg = 0x3UL;
  205. page_sz = 64 * 1024;
  206. break;
  207. case 8192 << 4:
  208. tsb_reg = 0x4UL;
  209. page_sz = 512 * 1024;
  210. break;
  211. case 8192 << 5:
  212. tsb_reg = 0x5UL;
  213. page_sz = 512 * 1024;
  214. break;
  215. case 8192 << 6:
  216. tsb_reg = 0x6UL;
  217. page_sz = 512 * 1024;
  218. break;
  219. case 8192 << 7:
  220. tsb_reg = 0x7UL;
  221. page_sz = 4 * 1024 * 1024;
  222. break;
  223. default:
  224. printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
  225. current->comm, current->pid, tsb_bytes);
  226. do_exit(SIGSEGV);
  227. }
  228. tte |= pte_sz_bits(page_sz);
  229. if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
  230. /* Physical mapping, no locked TLB entry for TSB. */
  231. tsb_reg |= tsb_paddr;
  232. mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
  233. mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
  234. mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
  235. } else {
  236. tsb_reg |= base;
  237. tsb_reg |= (tsb_paddr & (page_sz - 1UL));
  238. tte |= (tsb_paddr & ~(page_sz - 1UL));
  239. mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
  240. mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
  241. mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
  242. }
  243. /* Setup the Hypervisor TSB descriptor. */
  244. if (tlb_type == hypervisor) {
  245. struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
  246. switch (tsb_idx) {
  247. case MM_TSB_BASE:
  248. hp->pgsz_idx = HV_PGSZ_IDX_BASE;
  249. break;
  250. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  251. case MM_TSB_HUGE:
  252. hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
  253. break;
  254. #endif
  255. default:
  256. BUG();
  257. }
  258. hp->assoc = 1;
  259. hp->num_ttes = tsb_bytes / 16;
  260. hp->ctx_idx = 0;
  261. switch (tsb_idx) {
  262. case MM_TSB_BASE:
  263. hp->pgsz_mask = HV_PGSZ_MASK_BASE;
  264. break;
  265. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  266. case MM_TSB_HUGE:
  267. hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
  268. break;
  269. #endif
  270. default:
  271. BUG();
  272. }
  273. hp->tsb_base = tsb_paddr;
  274. hp->resv = 0;
  275. }
  276. }
  277. struct kmem_cache *pgtable_cache __read_mostly;
  278. static struct kmem_cache *tsb_caches[8] __read_mostly;
  279. static const char *tsb_cache_names[8] = {
  280. "tsb_8KB",
  281. "tsb_16KB",
  282. "tsb_32KB",
  283. "tsb_64KB",
  284. "tsb_128KB",
  285. "tsb_256KB",
  286. "tsb_512KB",
  287. "tsb_1MB",
  288. };
  289. void __init pgtable_cache_init(void)
  290. {
  291. unsigned long i;
  292. pgtable_cache = kmem_cache_create("pgtable_cache",
  293. PAGE_SIZE, PAGE_SIZE,
  294. 0,
  295. _clear_page);
  296. if (!pgtable_cache) {
  297. prom_printf("pgtable_cache_init(): Could not create!\n");
  298. prom_halt();
  299. }
  300. for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
  301. unsigned long size = 8192 << i;
  302. const char *name = tsb_cache_names[i];
  303. tsb_caches[i] = kmem_cache_create(name,
  304. size, size,
  305. 0, NULL);
  306. if (!tsb_caches[i]) {
  307. prom_printf("Could not create %s cache\n", name);
  308. prom_halt();
  309. }
  310. }
  311. }
  312. int sysctl_tsb_ratio = -2;
  313. static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
  314. {
  315. unsigned long num_ents = (new_size / sizeof(struct tsb));
  316. if (sysctl_tsb_ratio < 0)
  317. return num_ents - (num_ents >> -sysctl_tsb_ratio);
  318. else
  319. return num_ents + (num_ents >> sysctl_tsb_ratio);
  320. }
  321. /* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
  322. * do_sparc64_fault() invokes this routine to try and grow it.
  323. *
  324. * When we reach the maximum TSB size supported, we stick ~0UL into
  325. * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
  326. * will not trigger any longer.
  327. *
  328. * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
  329. * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
  330. * must be 512K aligned. It also must be physically contiguous, so we
  331. * cannot use vmalloc().
  332. *
  333. * The idea here is to grow the TSB when the RSS of the process approaches
  334. * the number of entries that the current TSB can hold at once. Currently,
  335. * we trigger when the RSS hits 3/4 of the TSB capacity.
  336. */
  337. void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
  338. {
  339. unsigned long max_tsb_size = 1 * 1024 * 1024;
  340. unsigned long new_size, old_size, flags;
  341. struct tsb *old_tsb, *new_tsb;
  342. unsigned long new_cache_index, old_cache_index;
  343. unsigned long new_rss_limit;
  344. gfp_t gfp_flags;
  345. if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
  346. max_tsb_size = (PAGE_SIZE << MAX_ORDER);
  347. new_cache_index = 0;
  348. for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
  349. new_rss_limit = tsb_size_to_rss_limit(new_size);
  350. if (new_rss_limit > rss)
  351. break;
  352. new_cache_index++;
  353. }
  354. if (new_size == max_tsb_size)
  355. new_rss_limit = ~0UL;
  356. retry_tsb_alloc:
  357. gfp_flags = GFP_KERNEL;
  358. if (new_size > (PAGE_SIZE * 2))
  359. gfp_flags |= __GFP_NOWARN | __GFP_NORETRY;
  360. new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
  361. gfp_flags, numa_node_id());
  362. if (unlikely(!new_tsb)) {
  363. /* Not being able to fork due to a high-order TSB
  364. * allocation failure is very bad behavior. Just back
  365. * down to a 0-order allocation and force no TSB
  366. * growing for this address space.
  367. */
  368. if (mm->context.tsb_block[tsb_index].tsb == NULL &&
  369. new_cache_index > 0) {
  370. new_cache_index = 0;
  371. new_size = 8192;
  372. new_rss_limit = ~0UL;
  373. goto retry_tsb_alloc;
  374. }
  375. /* If we failed on a TSB grow, we are under serious
  376. * memory pressure so don't try to grow any more.
  377. */
  378. if (mm->context.tsb_block[tsb_index].tsb != NULL)
  379. mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
  380. return;
  381. }
  382. /* Mark all tags as invalid. */
  383. tsb_init(new_tsb, new_size);
  384. /* Ok, we are about to commit the changes. If we are
  385. * growing an existing TSB the locking is very tricky,
  386. * so WATCH OUT!
  387. *
  388. * We have to hold mm->context.lock while committing to the
  389. * new TSB, this synchronizes us with processors in
  390. * flush_tsb_user() and switch_mm() for this address space.
  391. *
  392. * But even with that lock held, processors run asynchronously
  393. * accessing the old TSB via TLB miss handling. This is OK
  394. * because those actions are just propagating state from the
  395. * Linux page tables into the TSB, page table mappings are not
  396. * being changed. If a real fault occurs, the processor will
  397. * synchronize with us when it hits flush_tsb_user(), this is
  398. * also true for the case where vmscan is modifying the page
  399. * tables. The only thing we need to be careful with is to
  400. * skip any locked TSB entries during copy_tsb().
  401. *
  402. * When we finish committing to the new TSB, we have to drop
  403. * the lock and ask all other cpus running this address space
  404. * to run tsb_context_switch() to see the new TSB table.
  405. */
  406. spin_lock_irqsave(&mm->context.lock, flags);
  407. old_tsb = mm->context.tsb_block[tsb_index].tsb;
  408. old_cache_index =
  409. (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
  410. old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
  411. sizeof(struct tsb));
  412. /* Handle multiple threads trying to grow the TSB at the same time.
  413. * One will get in here first, and bump the size and the RSS limit.
  414. * The others will get in here next and hit this check.
  415. */
  416. if (unlikely(old_tsb &&
  417. (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
  418. spin_unlock_irqrestore(&mm->context.lock, flags);
  419. kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
  420. return;
  421. }
  422. mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
  423. if (old_tsb) {
  424. extern void copy_tsb(unsigned long old_tsb_base,
  425. unsigned long old_tsb_size,
  426. unsigned long new_tsb_base,
  427. unsigned long new_tsb_size,
  428. unsigned long page_size_shift);
  429. unsigned long old_tsb_base = (unsigned long) old_tsb;
  430. unsigned long new_tsb_base = (unsigned long) new_tsb;
  431. if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
  432. old_tsb_base = __pa(old_tsb_base);
  433. new_tsb_base = __pa(new_tsb_base);
  434. }
  435. copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
  436. tsb_index == MM_TSB_BASE ?
  437. PAGE_SHIFT : REAL_HPAGE_SHIFT);
  438. }
  439. mm->context.tsb_block[tsb_index].tsb = new_tsb;
  440. setup_tsb_params(mm, tsb_index, new_size);
  441. spin_unlock_irqrestore(&mm->context.lock, flags);
  442. /* If old_tsb is NULL, we're being invoked for the first time
  443. * from init_new_context().
  444. */
  445. if (old_tsb) {
  446. /* Reload it on the local cpu. */
  447. tsb_context_switch(mm);
  448. /* Now force other processors to do the same. */
  449. preempt_disable();
  450. smp_tsb_sync(mm);
  451. preempt_enable();
  452. /* Now it is safe to free the old tsb. */
  453. kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
  454. }
  455. }
  456. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  457. {
  458. unsigned long mm_rss = get_mm_rss(mm);
  459. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  460. unsigned long saved_hugetlb_pte_count;
  461. unsigned long saved_thp_pte_count;
  462. #endif
  463. unsigned int i;
  464. spin_lock_init(&mm->context.lock);
  465. mm->context.sparc64_ctx_val = 0UL;
  466. mm->context.tag_store = NULL;
  467. spin_lock_init(&mm->context.tag_lock);
  468. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  469. /* We reset them to zero because the fork() page copying
  470. * will re-increment the counters as the parent PTEs are
  471. * copied into the child address space.
  472. */
  473. saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
  474. saved_thp_pte_count = mm->context.thp_pte_count;
  475. mm->context.hugetlb_pte_count = 0;
  476. mm->context.thp_pte_count = 0;
  477. mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
  478. #endif
  479. /* copy_mm() copies over the parent's mm_struct before calling
  480. * us, so we need to zero out the TSB pointer or else tsb_grow()
  481. * will be confused and think there is an older TSB to free up.
  482. */
  483. for (i = 0; i < MM_NUM_TSBS; i++)
  484. mm->context.tsb_block[i].tsb = NULL;
  485. /* If this is fork, inherit the parent's TSB size. We would
  486. * grow it to that size on the first page fault anyways.
  487. */
  488. tsb_grow(mm, MM_TSB_BASE, mm_rss);
  489. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  490. if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))
  491. tsb_grow(mm, MM_TSB_HUGE,
  492. (saved_hugetlb_pte_count + saved_thp_pte_count) *
  493. REAL_HPAGE_PER_HPAGE);
  494. #endif
  495. if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
  496. return -ENOMEM;
  497. return 0;
  498. }
  499. static void tsb_destroy_one(struct tsb_config *tp)
  500. {
  501. unsigned long cache_index;
  502. if (!tp->tsb)
  503. return;
  504. cache_index = tp->tsb_reg_val & 0x7UL;
  505. kmem_cache_free(tsb_caches[cache_index], tp->tsb);
  506. tp->tsb = NULL;
  507. tp->tsb_reg_val = 0UL;
  508. }
  509. void destroy_context(struct mm_struct *mm)
  510. {
  511. unsigned long flags, i;
  512. for (i = 0; i < MM_NUM_TSBS; i++)
  513. tsb_destroy_one(&mm->context.tsb_block[i]);
  514. spin_lock_irqsave(&ctx_alloc_lock, flags);
  515. if (CTX_VALID(mm->context)) {
  516. unsigned long nr = CTX_NRBITS(mm->context);
  517. mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
  518. }
  519. spin_unlock_irqrestore(&ctx_alloc_lock, flags);
  520. /* If ADI tag storage was allocated for this task, free it */
  521. if (mm->context.tag_store) {
  522. tag_storage_desc_t *tag_desc;
  523. unsigned long max_desc;
  524. unsigned char *tags;
  525. tag_desc = mm->context.tag_store;
  526. max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
  527. for (i = 0; i < max_desc; i++) {
  528. tags = tag_desc->tags;
  529. tag_desc->tags = NULL;
  530. kfree(tags);
  531. tag_desc++;
  532. }
  533. kfree(mm->context.tag_store);
  534. mm->context.tag_store = NULL;
  535. }
  536. }