kmem.c 2.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include <linux/backing-dev.h>
  8. #include "xfs_message.h"
  9. #include "xfs_trace.h"
  10. void *
  11. kmem_alloc(size_t size, xfs_km_flags_t flags)
  12. {
  13. int retries = 0;
  14. gfp_t lflags = kmem_flags_convert(flags);
  15. void *ptr;
  16. trace_kmem_alloc(size, flags, _RET_IP_);
  17. do {
  18. ptr = kmalloc(size, lflags);
  19. if (ptr || (flags & KM_MAYFAIL))
  20. return ptr;
  21. if (!(++retries % 100))
  22. xfs_err(NULL,
  23. "%s(%u) possible memory allocation deadlock size %u in %s (mode:0x%x)",
  24. current->comm, current->pid,
  25. (unsigned int)size, __func__, lflags);
  26. congestion_wait(BLK_RW_ASYNC, HZ/50);
  27. } while (1);
  28. }
  29. /*
  30. * __vmalloc() will allocate data pages and auxiliary structures (e.g.
  31. * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence
  32. * we need to tell memory reclaim that we are in such a context via
  33. * PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here
  34. * and potentially deadlocking.
  35. */
  36. static void *
  37. __kmem_vmalloc(size_t size, xfs_km_flags_t flags)
  38. {
  39. unsigned nofs_flag = 0;
  40. void *ptr;
  41. gfp_t lflags = kmem_flags_convert(flags);
  42. if (flags & KM_NOFS)
  43. nofs_flag = memalloc_nofs_save();
  44. ptr = __vmalloc(size, lflags);
  45. if (flags & KM_NOFS)
  46. memalloc_nofs_restore(nofs_flag);
  47. return ptr;
  48. }
  49. /*
  50. * Same as kmem_alloc_large, except we guarantee the buffer returned is aligned
  51. * to the @align_mask. We only guarantee alignment up to page size, we'll clamp
  52. * alignment at page size if it is larger. vmalloc always returns a PAGE_SIZE
  53. * aligned region.
  54. */
  55. void *
  56. kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags)
  57. {
  58. void *ptr;
  59. trace_kmem_alloc_io(size, flags, _RET_IP_);
  60. if (WARN_ON_ONCE(align_mask >= PAGE_SIZE))
  61. align_mask = PAGE_SIZE - 1;
  62. ptr = kmem_alloc(size, flags | KM_MAYFAIL);
  63. if (ptr) {
  64. if (!((uintptr_t)ptr & align_mask))
  65. return ptr;
  66. kfree(ptr);
  67. }
  68. return __kmem_vmalloc(size, flags);
  69. }
  70. void *
  71. kmem_alloc_large(size_t size, xfs_km_flags_t flags)
  72. {
  73. void *ptr;
  74. trace_kmem_alloc_large(size, flags, _RET_IP_);
  75. ptr = kmem_alloc(size, flags | KM_MAYFAIL);
  76. if (ptr)
  77. return ptr;
  78. return __kmem_vmalloc(size, flags);
  79. }