nouveau_svm.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971
  1. /*
  2. * Copyright 2018 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. */
  22. #include "nouveau_svm.h"
  23. #include "nouveau_drv.h"
  24. #include "nouveau_chan.h"
  25. #include "nouveau_dmem.h"
  26. #include <nvif/notify.h>
  27. #include <nvif/object.h>
  28. #include <nvif/vmm.h>
  29. #include <nvif/class.h>
  30. #include <nvif/clb069.h>
  31. #include <nvif/ifc00d.h>
  32. #include <linux/sched/mm.h>
  33. #include <linux/sort.h>
  34. #include <linux/hmm.h>
  35. struct nouveau_svm {
  36. struct nouveau_drm *drm;
  37. struct mutex mutex;
  38. struct list_head inst;
  39. struct nouveau_svm_fault_buffer {
  40. int id;
  41. struct nvif_object object;
  42. u32 entries;
  43. u32 getaddr;
  44. u32 putaddr;
  45. u32 get;
  46. u32 put;
  47. struct nvif_notify notify;
  48. struct nouveau_svm_fault {
  49. u64 inst;
  50. u64 addr;
  51. u64 time;
  52. u32 engine;
  53. u8 gpc;
  54. u8 hub;
  55. u8 access;
  56. u8 client;
  57. u8 fault;
  58. struct nouveau_svmm *svmm;
  59. } **fault;
  60. int fault_nr;
  61. } buffer[1];
  62. };
  63. #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
  64. #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
  65. struct nouveau_pfnmap_args {
  66. struct nvif_ioctl_v0 i;
  67. struct nvif_ioctl_mthd_v0 m;
  68. struct nvif_vmm_pfnmap_v0 p;
  69. };
  70. struct nouveau_ivmm {
  71. struct nouveau_svmm *svmm;
  72. u64 inst;
  73. struct list_head head;
  74. };
  75. static struct nouveau_ivmm *
  76. nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
  77. {
  78. struct nouveau_ivmm *ivmm;
  79. list_for_each_entry(ivmm, &svm->inst, head) {
  80. if (ivmm->inst == inst)
  81. return ivmm;
  82. }
  83. return NULL;
  84. }
  85. #define SVMM_DBG(s,f,a...) \
  86. NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
  87. #define SVMM_ERR(s,f,a...) \
  88. NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
  89. int
  90. nouveau_svmm_bind(struct drm_device *dev, void *data,
  91. struct drm_file *file_priv)
  92. {
  93. struct nouveau_cli *cli = nouveau_cli(file_priv);
  94. struct drm_nouveau_svm_bind *args = data;
  95. unsigned target, cmd, priority;
  96. unsigned long addr, end;
  97. struct mm_struct *mm;
  98. args->va_start &= PAGE_MASK;
  99. args->va_end = ALIGN(args->va_end, PAGE_SIZE);
  100. /* Sanity check arguments */
  101. if (args->reserved0 || args->reserved1)
  102. return -EINVAL;
  103. if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
  104. return -EINVAL;
  105. if (args->va_start >= args->va_end)
  106. return -EINVAL;
  107. cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
  108. cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
  109. switch (cmd) {
  110. case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
  111. break;
  112. default:
  113. return -EINVAL;
  114. }
  115. priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
  116. priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
  117. /* FIXME support CPU target ie all target value < GPU_VRAM */
  118. target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
  119. target &= NOUVEAU_SVM_BIND_TARGET_MASK;
  120. switch (target) {
  121. case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
  122. break;
  123. default:
  124. return -EINVAL;
  125. }
  126. /*
  127. * FIXME: For now refuse non 0 stride, we need to change the migrate
  128. * kernel function to handle stride to avoid to create a mess within
  129. * each device driver.
  130. */
  131. if (args->stride)
  132. return -EINVAL;
  133. /*
  134. * Ok we are ask to do something sane, for now we only support migrate
  135. * commands but we will add things like memory policy (what to do on
  136. * page fault) and maybe some other commands.
  137. */
  138. mm = get_task_mm(current);
  139. if (!mm) {
  140. return -EINVAL;
  141. }
  142. mmap_read_lock(mm);
  143. if (!cli->svm.svmm) {
  144. mmap_read_unlock(mm);
  145. mmput(mm);
  146. return -EINVAL;
  147. }
  148. for (addr = args->va_start, end = args->va_end; addr < end;) {
  149. struct vm_area_struct *vma;
  150. unsigned long next;
  151. vma = find_vma_intersection(mm, addr, end);
  152. if (!vma)
  153. break;
  154. addr = max(addr, vma->vm_start);
  155. next = min(vma->vm_end, end);
  156. /* This is a best effort so we ignore errors */
  157. nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr,
  158. next);
  159. addr = next;
  160. }
  161. /*
  162. * FIXME Return the number of page we have migrated, again we need to
  163. * update the migrate API to return that information so that we can
  164. * report it to user space.
  165. */
  166. args->result = 0;
  167. mmap_read_unlock(mm);
  168. mmput(mm);
  169. return 0;
  170. }
  171. /* Unlink channel instance from SVMM. */
  172. void
  173. nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
  174. {
  175. struct nouveau_ivmm *ivmm;
  176. if (svmm) {
  177. mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
  178. ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
  179. if (ivmm) {
  180. list_del(&ivmm->head);
  181. kfree(ivmm);
  182. }
  183. mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
  184. }
  185. }
  186. /* Link channel instance to SVMM. */
  187. int
  188. nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
  189. {
  190. struct nouveau_ivmm *ivmm;
  191. if (svmm) {
  192. if (!(ivmm = kmalloc(sizeof(*ivmm), GFP_KERNEL)))
  193. return -ENOMEM;
  194. ivmm->svmm = svmm;
  195. ivmm->inst = inst;
  196. mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
  197. list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
  198. mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
  199. }
  200. return 0;
  201. }
  202. /* Invalidate SVMM address-range on GPU. */
  203. void
  204. nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
  205. {
  206. if (limit > start) {
  207. bool super = svmm->vmm->vmm.object.client->super;
  208. svmm->vmm->vmm.object.client->super = true;
  209. nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
  210. &(struct nvif_vmm_pfnclr_v0) {
  211. .addr = start,
  212. .size = limit - start,
  213. }, sizeof(struct nvif_vmm_pfnclr_v0));
  214. svmm->vmm->vmm.object.client->super = super;
  215. }
  216. }
  217. static int
  218. nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
  219. const struct mmu_notifier_range *update)
  220. {
  221. struct nouveau_svmm *svmm =
  222. container_of(mn, struct nouveau_svmm, notifier);
  223. unsigned long start = update->start;
  224. unsigned long limit = update->end;
  225. if (!mmu_notifier_range_blockable(update))
  226. return -EAGAIN;
  227. SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
  228. mutex_lock(&svmm->mutex);
  229. if (unlikely(!svmm->vmm))
  230. goto out;
  231. /*
  232. * Ignore invalidation callbacks for device private pages since
  233. * the invalidation is handled as part of the migration process.
  234. */
  235. if (update->event == MMU_NOTIFY_MIGRATE &&
  236. update->migrate_pgmap_owner == svmm->vmm->cli->drm->dev)
  237. goto out;
  238. if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
  239. if (start < svmm->unmanaged.start) {
  240. nouveau_svmm_invalidate(svmm, start,
  241. svmm->unmanaged.limit);
  242. }
  243. start = svmm->unmanaged.limit;
  244. }
  245. nouveau_svmm_invalidate(svmm, start, limit);
  246. out:
  247. mutex_unlock(&svmm->mutex);
  248. return 0;
  249. }
  250. static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
  251. {
  252. kfree(container_of(mn, struct nouveau_svmm, notifier));
  253. }
  254. static const struct mmu_notifier_ops nouveau_mn_ops = {
  255. .invalidate_range_start = nouveau_svmm_invalidate_range_start,
  256. .free_notifier = nouveau_svmm_free_notifier,
  257. };
  258. void
  259. nouveau_svmm_fini(struct nouveau_svmm **psvmm)
  260. {
  261. struct nouveau_svmm *svmm = *psvmm;
  262. if (svmm) {
  263. mutex_lock(&svmm->mutex);
  264. svmm->vmm = NULL;
  265. mutex_unlock(&svmm->mutex);
  266. mmu_notifier_put(&svmm->notifier);
  267. *psvmm = NULL;
  268. }
  269. }
  270. int
  271. nouveau_svmm_init(struct drm_device *dev, void *data,
  272. struct drm_file *file_priv)
  273. {
  274. struct nouveau_cli *cli = nouveau_cli(file_priv);
  275. struct nouveau_svmm *svmm;
  276. struct drm_nouveau_svm_init *args = data;
  277. int ret;
  278. /* We need to fail if svm is disabled */
  279. if (!cli->drm->svm)
  280. return -ENOSYS;
  281. /* Allocate tracking for SVM-enabled VMM. */
  282. if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
  283. return -ENOMEM;
  284. svmm->vmm = &cli->svm;
  285. svmm->unmanaged.start = args->unmanaged_addr;
  286. svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
  287. mutex_init(&svmm->mutex);
  288. /* Check that SVM isn't already enabled for the client. */
  289. mutex_lock(&cli->mutex);
  290. if (cli->svm.cli) {
  291. ret = -EBUSY;
  292. goto out_free;
  293. }
  294. /* Allocate a new GPU VMM that can support SVM (managed by the
  295. * client, with replayable faults enabled).
  296. *
  297. * All future channel/memory allocations will make use of this
  298. * VMM instead of the standard one.
  299. */
  300. ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
  301. cli->vmm.vmm.object.oclass, true,
  302. args->unmanaged_addr, args->unmanaged_size,
  303. &(struct gp100_vmm_v0) {
  304. .fault_replay = true,
  305. }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
  306. if (ret)
  307. goto out_free;
  308. mmap_write_lock(current->mm);
  309. svmm->notifier.ops = &nouveau_mn_ops;
  310. ret = __mmu_notifier_register(&svmm->notifier, current->mm);
  311. if (ret)
  312. goto out_mm_unlock;
  313. /* Note, ownership of svmm transfers to mmu_notifier */
  314. cli->svm.svmm = svmm;
  315. cli->svm.cli = cli;
  316. mmap_write_unlock(current->mm);
  317. mutex_unlock(&cli->mutex);
  318. return 0;
  319. out_mm_unlock:
  320. mmap_write_unlock(current->mm);
  321. out_free:
  322. mutex_unlock(&cli->mutex);
  323. kfree(svmm);
  324. return ret;
  325. }
  326. /* Issue fault replay for GPU to retry accesses that faulted previously. */
  327. static void
  328. nouveau_svm_fault_replay(struct nouveau_svm *svm)
  329. {
  330. SVM_DBG(svm, "replay");
  331. WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
  332. GP100_VMM_VN_FAULT_REPLAY,
  333. &(struct gp100_vmm_fault_replay_vn) {},
  334. sizeof(struct gp100_vmm_fault_replay_vn)));
  335. }
  336. /* Cancel a replayable fault that could not be handled.
  337. *
  338. * Cancelling the fault will trigger recovery to reset the engine
  339. * and kill the offending channel (ie. GPU SIGSEGV).
  340. */
  341. static void
  342. nouveau_svm_fault_cancel(struct nouveau_svm *svm,
  343. u64 inst, u8 hub, u8 gpc, u8 client)
  344. {
  345. SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client);
  346. WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
  347. GP100_VMM_VN_FAULT_CANCEL,
  348. &(struct gp100_vmm_fault_cancel_v0) {
  349. .hub = hub,
  350. .gpc = gpc,
  351. .client = client,
  352. .inst = inst,
  353. }, sizeof(struct gp100_vmm_fault_cancel_v0)));
  354. }
  355. static void
  356. nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
  357. struct nouveau_svm_fault *fault)
  358. {
  359. nouveau_svm_fault_cancel(svm, fault->inst,
  360. fault->hub,
  361. fault->gpc,
  362. fault->client);
  363. }
  364. static int
  365. nouveau_svm_fault_cmp(const void *a, const void *b)
  366. {
  367. const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
  368. const struct nouveau_svm_fault *fb = *(struct nouveau_svm_fault **)b;
  369. int ret;
  370. if ((ret = (s64)fa->inst - fb->inst))
  371. return ret;
  372. if ((ret = (s64)fa->addr - fb->addr))
  373. return ret;
  374. /*XXX: atomic? */
  375. return (fa->access == 0 || fa->access == 3) -
  376. (fb->access == 0 || fb->access == 3);
  377. }
  378. static void
  379. nouveau_svm_fault_cache(struct nouveau_svm *svm,
  380. struct nouveau_svm_fault_buffer *buffer, u32 offset)
  381. {
  382. struct nvif_object *memory = &buffer->object;
  383. const u32 instlo = nvif_rd32(memory, offset + 0x00);
  384. const u32 insthi = nvif_rd32(memory, offset + 0x04);
  385. const u32 addrlo = nvif_rd32(memory, offset + 0x08);
  386. const u32 addrhi = nvif_rd32(memory, offset + 0x0c);
  387. const u32 timelo = nvif_rd32(memory, offset + 0x10);
  388. const u32 timehi = nvif_rd32(memory, offset + 0x14);
  389. const u32 engine = nvif_rd32(memory, offset + 0x18);
  390. const u32 info = nvif_rd32(memory, offset + 0x1c);
  391. const u64 inst = (u64)insthi << 32 | instlo;
  392. const u8 gpc = (info & 0x1f000000) >> 24;
  393. const u8 hub = (info & 0x00100000) >> 20;
  394. const u8 client = (info & 0x00007f00) >> 8;
  395. struct nouveau_svm_fault *fault;
  396. //XXX: i think we're supposed to spin waiting */
  397. if (WARN_ON(!(info & 0x80000000)))
  398. return;
  399. nvif_mask(memory, offset + 0x1c, 0x80000000, 0x00000000);
  400. if (!buffer->fault[buffer->fault_nr]) {
  401. fault = kmalloc(sizeof(*fault), GFP_KERNEL);
  402. if (WARN_ON(!fault)) {
  403. nouveau_svm_fault_cancel(svm, inst, hub, gpc, client);
  404. return;
  405. }
  406. buffer->fault[buffer->fault_nr] = fault;
  407. }
  408. fault = buffer->fault[buffer->fault_nr++];
  409. fault->inst = inst;
  410. fault->addr = (u64)addrhi << 32 | addrlo;
  411. fault->time = (u64)timehi << 32 | timelo;
  412. fault->engine = engine;
  413. fault->gpc = gpc;
  414. fault->hub = hub;
  415. fault->access = (info & 0x000f0000) >> 16;
  416. fault->client = client;
  417. fault->fault = (info & 0x0000001f);
  418. SVM_DBG(svm, "fault %016llx %016llx %02x",
  419. fault->inst, fault->addr, fault->access);
  420. }
  421. struct svm_notifier {
  422. struct mmu_interval_notifier notifier;
  423. struct nouveau_svmm *svmm;
  424. };
  425. static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier *mni,
  426. const struct mmu_notifier_range *range,
  427. unsigned long cur_seq)
  428. {
  429. struct svm_notifier *sn =
  430. container_of(mni, struct svm_notifier, notifier);
  431. /*
  432. * serializes the update to mni->invalidate_seq done by caller and
  433. * prevents invalidation of the PTE from progressing while HW is being
  434. * programmed. This is very hacky and only works because the normal
  435. * notifier that does invalidation is always called after the range
  436. * notifier.
  437. */
  438. if (mmu_notifier_range_blockable(range))
  439. mutex_lock(&sn->svmm->mutex);
  440. else if (!mutex_trylock(&sn->svmm->mutex))
  441. return false;
  442. mmu_interval_set_seq(mni, cur_seq);
  443. mutex_unlock(&sn->svmm->mutex);
  444. return true;
  445. }
  446. static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
  447. .invalidate = nouveau_svm_range_invalidate,
  448. };
  449. static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm,
  450. struct hmm_range *range,
  451. struct nouveau_pfnmap_args *args)
  452. {
  453. struct page *page;
  454. /*
  455. * The address prepared here is passed through nvif_object_ioctl()
  456. * to an eventual DMA map in something like gp100_vmm_pgt_pfn()
  457. *
  458. * This is all just encoding the internal hmm representation into a
  459. * different nouveau internal representation.
  460. */
  461. if (!(range->hmm_pfns[0] & HMM_PFN_VALID)) {
  462. args->p.phys[0] = 0;
  463. return;
  464. }
  465. page = hmm_pfn_to_page(range->hmm_pfns[0]);
  466. /*
  467. * Only map compound pages to the GPU if the CPU is also mapping the
  468. * page as a compound page. Otherwise, the PTE protections might not be
  469. * consistent (e.g., CPU only maps part of a compound page).
  470. * Note that the underlying page might still be larger than the
  471. * CPU mapping (e.g., a PUD sized compound page partially mapped with
  472. * a PMD sized page table entry).
  473. */
  474. if (hmm_pfn_to_map_order(range->hmm_pfns[0])) {
  475. unsigned long addr = args->p.addr;
  476. args->p.page = hmm_pfn_to_map_order(range->hmm_pfns[0]) +
  477. PAGE_SHIFT;
  478. args->p.size = 1UL << args->p.page;
  479. args->p.addr &= ~(args->p.size - 1);
  480. page -= (addr - args->p.addr) >> PAGE_SHIFT;
  481. }
  482. if (is_device_private_page(page))
  483. args->p.phys[0] = nouveau_dmem_page_addr(page) |
  484. NVIF_VMM_PFNMAP_V0_V |
  485. NVIF_VMM_PFNMAP_V0_VRAM;
  486. else
  487. args->p.phys[0] = page_to_phys(page) |
  488. NVIF_VMM_PFNMAP_V0_V |
  489. NVIF_VMM_PFNMAP_V0_HOST;
  490. if (range->hmm_pfns[0] & HMM_PFN_WRITE)
  491. args->p.phys[0] |= NVIF_VMM_PFNMAP_V0_W;
  492. }
  493. static int nouveau_range_fault(struct nouveau_svmm *svmm,
  494. struct nouveau_drm *drm,
  495. struct nouveau_pfnmap_args *args, u32 size,
  496. unsigned long hmm_flags,
  497. struct svm_notifier *notifier)
  498. {
  499. unsigned long timeout =
  500. jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
  501. /* Have HMM fault pages within the fault window to the GPU. */
  502. unsigned long hmm_pfns[1];
  503. struct hmm_range range = {
  504. .notifier = &notifier->notifier,
  505. .start = notifier->notifier.interval_tree.start,
  506. .end = notifier->notifier.interval_tree.last + 1,
  507. .default_flags = hmm_flags,
  508. .hmm_pfns = hmm_pfns,
  509. .dev_private_owner = drm->dev,
  510. };
  511. struct mm_struct *mm = notifier->notifier.mm;
  512. int ret;
  513. while (true) {
  514. if (time_after(jiffies, timeout))
  515. return -EBUSY;
  516. range.notifier_seq = mmu_interval_read_begin(range.notifier);
  517. mmap_read_lock(mm);
  518. ret = hmm_range_fault(&range);
  519. mmap_read_unlock(mm);
  520. if (ret) {
  521. if (ret == -EBUSY)
  522. continue;
  523. return ret;
  524. }
  525. mutex_lock(&svmm->mutex);
  526. if (mmu_interval_read_retry(range.notifier,
  527. range.notifier_seq)) {
  528. mutex_unlock(&svmm->mutex);
  529. continue;
  530. }
  531. break;
  532. }
  533. nouveau_hmm_convert_pfn(drm, &range, args);
  534. svmm->vmm->vmm.object.client->super = true;
  535. ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
  536. svmm->vmm->vmm.object.client->super = false;
  537. mutex_unlock(&svmm->mutex);
  538. return ret;
  539. }
  540. static int
  541. nouveau_svm_fault(struct nvif_notify *notify)
  542. {
  543. struct nouveau_svm_fault_buffer *buffer =
  544. container_of(notify, typeof(*buffer), notify);
  545. struct nouveau_svm *svm =
  546. container_of(buffer, typeof(*svm), buffer[buffer->id]);
  547. struct nvif_object *device = &svm->drm->client.device.object;
  548. struct nouveau_svmm *svmm;
  549. struct {
  550. struct nouveau_pfnmap_args i;
  551. u64 phys[1];
  552. } args;
  553. unsigned long hmm_flags;
  554. u64 inst, start, limit;
  555. int fi, fn;
  556. int replay = 0, ret;
  557. /* Parse available fault buffer entries into a cache, and update
  558. * the GET pointer so HW can reuse the entries.
  559. */
  560. SVM_DBG(svm, "fault handler");
  561. if (buffer->get == buffer->put) {
  562. buffer->put = nvif_rd32(device, buffer->putaddr);
  563. buffer->get = nvif_rd32(device, buffer->getaddr);
  564. if (buffer->get == buffer->put)
  565. return NVIF_NOTIFY_KEEP;
  566. }
  567. buffer->fault_nr = 0;
  568. SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
  569. while (buffer->get != buffer->put) {
  570. nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
  571. if (++buffer->get == buffer->entries)
  572. buffer->get = 0;
  573. }
  574. nvif_wr32(device, buffer->getaddr, buffer->get);
  575. SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
  576. /* Sort parsed faults by instance pointer to prevent unnecessary
  577. * instance to SVMM translations, followed by address and access
  578. * type to reduce the amount of work when handling the faults.
  579. */
  580. sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
  581. nouveau_svm_fault_cmp, NULL);
  582. /* Lookup SVMM structure for each unique instance pointer. */
  583. mutex_lock(&svm->mutex);
  584. for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
  585. if (!svmm || buffer->fault[fi]->inst != inst) {
  586. struct nouveau_ivmm *ivmm =
  587. nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
  588. svmm = ivmm ? ivmm->svmm : NULL;
  589. inst = buffer->fault[fi]->inst;
  590. SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
  591. }
  592. buffer->fault[fi]->svmm = svmm;
  593. }
  594. mutex_unlock(&svm->mutex);
  595. /* Process list of faults. */
  596. args.i.i.version = 0;
  597. args.i.i.type = NVIF_IOCTL_V0_MTHD;
  598. args.i.m.version = 0;
  599. args.i.m.method = NVIF_VMM_V0_PFNMAP;
  600. args.i.p.version = 0;
  601. for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
  602. struct svm_notifier notifier;
  603. struct mm_struct *mm;
  604. /* Cancel any faults from non-SVM channels. */
  605. if (!(svmm = buffer->fault[fi]->svmm)) {
  606. nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
  607. continue;
  608. }
  609. SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
  610. /* We try and group handling of faults within a small
  611. * window into a single update.
  612. */
  613. start = buffer->fault[fi]->addr;
  614. limit = start + PAGE_SIZE;
  615. if (start < svmm->unmanaged.limit)
  616. limit = min_t(u64, limit, svmm->unmanaged.start);
  617. /*
  618. * Prepare the GPU-side update of all pages within the
  619. * fault window, determining required pages and access
  620. * permissions based on pending faults.
  621. */
  622. args.i.p.addr = start;
  623. args.i.p.page = PAGE_SHIFT;
  624. args.i.p.size = PAGE_SIZE;
  625. /*
  626. * Determine required permissions based on GPU fault
  627. * access flags.
  628. * XXX: atomic?
  629. */
  630. switch (buffer->fault[fi]->access) {
  631. case 0: /* READ. */
  632. hmm_flags = HMM_PFN_REQ_FAULT;
  633. break;
  634. case 3: /* PREFETCH. */
  635. hmm_flags = 0;
  636. break;
  637. default:
  638. hmm_flags = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE;
  639. break;
  640. }
  641. mm = svmm->notifier.mm;
  642. if (!mmget_not_zero(mm)) {
  643. nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
  644. continue;
  645. }
  646. notifier.svmm = svmm;
  647. ret = mmu_interval_notifier_insert(&notifier.notifier, mm,
  648. args.i.p.addr, args.i.p.size,
  649. &nouveau_svm_mni_ops);
  650. if (!ret) {
  651. ret = nouveau_range_fault(svmm, svm->drm, &args.i,
  652. sizeof(args), hmm_flags, &notifier);
  653. mmu_interval_notifier_remove(&notifier.notifier);
  654. }
  655. mmput(mm);
  656. limit = args.i.p.addr + args.i.p.size;
  657. for (fn = fi; ++fn < buffer->fault_nr; ) {
  658. /* It's okay to skip over duplicate addresses from the
  659. * same SVMM as faults are ordered by access type such
  660. * that only the first one needs to be handled.
  661. *
  662. * ie. WRITE faults appear first, thus any handling of
  663. * pending READ faults will already be satisfied.
  664. * But if a large page is mapped, make sure subsequent
  665. * fault addresses have sufficient access permission.
  666. */
  667. if (buffer->fault[fn]->svmm != svmm ||
  668. buffer->fault[fn]->addr >= limit ||
  669. (buffer->fault[fi]->access == 0 /* READ. */ &&
  670. !(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
  671. (buffer->fault[fi]->access != 0 /* READ. */ &&
  672. buffer->fault[fi]->access != 3 /* PREFETCH. */ &&
  673. !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)))
  674. break;
  675. }
  676. /* If handling failed completely, cancel all faults. */
  677. if (ret) {
  678. while (fi < fn) {
  679. struct nouveau_svm_fault *fault =
  680. buffer->fault[fi++];
  681. nouveau_svm_fault_cancel_fault(svm, fault);
  682. }
  683. } else
  684. replay++;
  685. }
  686. /* Issue fault replay to the GPU. */
  687. if (replay)
  688. nouveau_svm_fault_replay(svm);
  689. return NVIF_NOTIFY_KEEP;
  690. }
  691. static struct nouveau_pfnmap_args *
  692. nouveau_pfns_to_args(void *pfns)
  693. {
  694. return container_of(pfns, struct nouveau_pfnmap_args, p.phys);
  695. }
  696. u64 *
  697. nouveau_pfns_alloc(unsigned long npages)
  698. {
  699. struct nouveau_pfnmap_args *args;
  700. args = kzalloc(struct_size(args, p.phys, npages), GFP_KERNEL);
  701. if (!args)
  702. return NULL;
  703. args->i.type = NVIF_IOCTL_V0_MTHD;
  704. args->m.method = NVIF_VMM_V0_PFNMAP;
  705. args->p.page = PAGE_SHIFT;
  706. return args->p.phys;
  707. }
  708. void
  709. nouveau_pfns_free(u64 *pfns)
  710. {
  711. struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
  712. kfree(args);
  713. }
  714. void
  715. nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
  716. unsigned long addr, u64 *pfns, unsigned long npages)
  717. {
  718. struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
  719. int ret;
  720. args->p.addr = addr;
  721. args->p.size = npages << PAGE_SHIFT;
  722. mutex_lock(&svmm->mutex);
  723. svmm->vmm->vmm.object.client->super = true;
  724. ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
  725. npages * sizeof(args->p.phys[0]), NULL);
  726. svmm->vmm->vmm.object.client->super = false;
  727. mutex_unlock(&svmm->mutex);
  728. }
  729. static void
  730. nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
  731. {
  732. struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
  733. nvif_notify_put(&buffer->notify);
  734. }
  735. static int
  736. nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id)
  737. {
  738. struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
  739. struct nvif_object *device = &svm->drm->client.device.object;
  740. buffer->get = nvif_rd32(device, buffer->getaddr);
  741. buffer->put = nvif_rd32(device, buffer->putaddr);
  742. SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
  743. return nvif_notify_get(&buffer->notify);
  744. }
  745. static void
  746. nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
  747. {
  748. struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
  749. int i;
  750. if (buffer->fault) {
  751. for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
  752. kfree(buffer->fault[i]);
  753. kvfree(buffer->fault);
  754. }
  755. nouveau_svm_fault_buffer_fini(svm, id);
  756. nvif_notify_dtor(&buffer->notify);
  757. nvif_object_dtor(&buffer->object);
  758. }
  759. static int
  760. nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
  761. {
  762. struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
  763. struct nouveau_drm *drm = svm->drm;
  764. struct nvif_object *device = &drm->client.device.object;
  765. struct nvif_clb069_v0 args = {};
  766. int ret;
  767. buffer->id = id;
  768. ret = nvif_object_ctor(device, "svmFaultBuffer", 0, oclass, &args,
  769. sizeof(args), &buffer->object);
  770. if (ret < 0) {
  771. SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
  772. return ret;
  773. }
  774. nvif_object_map(&buffer->object, NULL, 0);
  775. buffer->entries = args.entries;
  776. buffer->getaddr = args.get;
  777. buffer->putaddr = args.put;
  778. ret = nvif_notify_ctor(&buffer->object, "svmFault", nouveau_svm_fault,
  779. true, NVB069_V0_NTFY_FAULT, NULL, 0, 0,
  780. &buffer->notify);
  781. if (ret)
  782. return ret;
  783. buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
  784. if (!buffer->fault)
  785. return -ENOMEM;
  786. return nouveau_svm_fault_buffer_init(svm, id);
  787. }
  788. void
  789. nouveau_svm_resume(struct nouveau_drm *drm)
  790. {
  791. struct nouveau_svm *svm = drm->svm;
  792. if (svm)
  793. nouveau_svm_fault_buffer_init(svm, 0);
  794. }
  795. void
  796. nouveau_svm_suspend(struct nouveau_drm *drm)
  797. {
  798. struct nouveau_svm *svm = drm->svm;
  799. if (svm)
  800. nouveau_svm_fault_buffer_fini(svm, 0);
  801. }
  802. void
  803. nouveau_svm_fini(struct nouveau_drm *drm)
  804. {
  805. struct nouveau_svm *svm = drm->svm;
  806. if (svm) {
  807. nouveau_svm_fault_buffer_dtor(svm, 0);
  808. kfree(drm->svm);
  809. drm->svm = NULL;
  810. }
  811. }
  812. void
  813. nouveau_svm_init(struct nouveau_drm *drm)
  814. {
  815. static const struct nvif_mclass buffers[] = {
  816. { VOLTA_FAULT_BUFFER_A, 0 },
  817. { MAXWELL_FAULT_BUFFER_A, 0 },
  818. {}
  819. };
  820. struct nouveau_svm *svm;
  821. int ret;
  822. /* Disable on Volta and newer until channel recovery is fixed,
  823. * otherwise clients will have a trivial way to trash the GPU
  824. * for everyone.
  825. */
  826. if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
  827. return;
  828. if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL)))
  829. return;
  830. drm->svm->drm = drm;
  831. mutex_init(&drm->svm->mutex);
  832. INIT_LIST_HEAD(&drm->svm->inst);
  833. ret = nvif_mclass(&drm->client.device.object, buffers);
  834. if (ret < 0) {
  835. SVM_DBG(svm, "No supported fault buffer class");
  836. nouveau_svm_fini(drm);
  837. return;
  838. }
  839. ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0);
  840. if (ret) {
  841. nouveau_svm_fini(drm);
  842. return;
  843. }
  844. SVM_DBG(svm, "Initialised");
  845. }