kernel_user_helpers.rst 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. ============================
  2. Kernel-provided User Helpers
  3. ============================
  4. These are segment of kernel provided user code reachable from user space
  5. at a fixed address in kernel memory. This is used to provide user space
  6. with some operations which require kernel help because of unimplemented
  7. native feature and/or instructions in many ARM CPUs. The idea is for this
  8. code to be executed directly in user mode for best efficiency but which is
  9. too intimate with the kernel counter part to be left to user libraries.
  10. In fact this code might even differ from one CPU to another depending on
  11. the available instruction set, or whether it is a SMP systems. In other
  12. words, the kernel reserves the right to change this code as needed without
  13. warning. Only the entry points and their results as documented here are
  14. guaranteed to be stable.
  15. This is different from (but doesn't preclude) a full blown VDSO
  16. implementation, however a VDSO would prevent some assembly tricks with
  17. constants that allows for efficient branching to those code segments. And
  18. since those code segments only use a few cycles before returning to user
  19. code, the overhead of a VDSO indirect far call would add a measurable
  20. overhead to such minimalistic operations.
  21. User space is expected to bypass those helpers and implement those things
  22. inline (either in the code emitted directly by the compiler, or part of
  23. the implementation of a library call) when optimizing for a recent enough
  24. processor that has the necessary native support, but only if resulting
  25. binaries are already to be incompatible with earlier ARM processors due to
  26. usage of similar native instructions for other things. In other words
  27. don't make binaries unable to run on earlier processors just for the sake
  28. of not using these kernel helpers if your compiled code is not going to
  29. use new instructions for other purpose.
  30. New helpers may be added over time, so an older kernel may be missing some
  31. helpers present in a newer kernel. For this reason, programs must check
  32. the value of __kuser_helper_version (see below) before assuming that it is
  33. safe to call any particular helper. This check should ideally be
  34. performed only once at process startup time, and execution aborted early
  35. if the required helpers are not provided by the kernel version that
  36. process is running on.
  37. kuser_helper_version
  38. --------------------
  39. Location: 0xffff0ffc
  40. Reference declaration::
  41. extern int32_t __kuser_helper_version;
  42. Definition:
  43. This field contains the number of helpers being implemented by the
  44. running kernel. User space may read this to determine the availability
  45. of a particular helper.
  46. Usage example::
  47. #define __kuser_helper_version (*(int32_t *)0xffff0ffc)
  48. void check_kuser_version(void)
  49. {
  50. if (__kuser_helper_version < 2) {
  51. fprintf(stderr, "can't do atomic operations, kernel too old\n");
  52. abort();
  53. }
  54. }
  55. Notes:
  56. User space may assume that the value of this field never changes
  57. during the lifetime of any single process. This means that this
  58. field can be read once during the initialisation of a library or
  59. startup phase of a program.
  60. kuser_get_tls
  61. -------------
  62. Location: 0xffff0fe0
  63. Reference prototype::
  64. void * __kuser_get_tls(void);
  65. Input:
  66. lr = return address
  67. Output:
  68. r0 = TLS value
  69. Clobbered registers:
  70. none
  71. Definition:
  72. Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
  73. Usage example::
  74. typedef void * (__kuser_get_tls_t)(void);
  75. #define __kuser_get_tls (*(__kuser_get_tls_t *)0xffff0fe0)
  76. void foo()
  77. {
  78. void *tls = __kuser_get_tls();
  79. printf("TLS = %p\n", tls);
  80. }
  81. Notes:
  82. - Valid only if __kuser_helper_version >= 1 (from kernel version 2.6.12).
  83. kuser_cmpxchg
  84. -------------
  85. Location: 0xffff0fc0
  86. Reference prototype::
  87. int __kuser_cmpxchg(int32_t oldval, int32_t newval, volatile int32_t *ptr);
  88. Input:
  89. r0 = oldval
  90. r1 = newval
  91. r2 = ptr
  92. lr = return address
  93. Output:
  94. r0 = success code (zero or non-zero)
  95. C flag = set if r0 == 0, clear if r0 != 0
  96. Clobbered registers:
  97. r3, ip, flags
  98. Definition:
  99. Atomically store newval in `*ptr` only if `*ptr` is equal to oldval.
  100. Return zero if `*ptr` was changed or non-zero if no exchange happened.
  101. The C flag is also set if `*ptr` was changed to allow for assembly
  102. optimization in the calling code.
  103. Usage example::
  104. typedef int (__kuser_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
  105. #define __kuser_cmpxchg (*(__kuser_cmpxchg_t *)0xffff0fc0)
  106. int atomic_add(volatile int *ptr, int val)
  107. {
  108. int old, new;
  109. do {
  110. old = *ptr;
  111. new = old + val;
  112. } while(__kuser_cmpxchg(old, new, ptr));
  113. return new;
  114. }
  115. Notes:
  116. - This routine already includes memory barriers as needed.
  117. - Valid only if __kuser_helper_version >= 2 (from kernel version 2.6.12).
  118. kuser_memory_barrier
  119. --------------------
  120. Location: 0xffff0fa0
  121. Reference prototype::
  122. void __kuser_memory_barrier(void);
  123. Input:
  124. lr = return address
  125. Output:
  126. none
  127. Clobbered registers:
  128. none
  129. Definition:
  130. Apply any needed memory barrier to preserve consistency with data modified
  131. manually and __kuser_cmpxchg usage.
  132. Usage example::
  133. typedef void (__kuser_dmb_t)(void);
  134. #define __kuser_dmb (*(__kuser_dmb_t *)0xffff0fa0)
  135. Notes:
  136. - Valid only if __kuser_helper_version >= 3 (from kernel version 2.6.15).
  137. kuser_cmpxchg64
  138. ---------------
  139. Location: 0xffff0f60
  140. Reference prototype::
  141. int __kuser_cmpxchg64(const int64_t *oldval,
  142. const int64_t *newval,
  143. volatile int64_t *ptr);
  144. Input:
  145. r0 = pointer to oldval
  146. r1 = pointer to newval
  147. r2 = pointer to target value
  148. lr = return address
  149. Output:
  150. r0 = success code (zero or non-zero)
  151. C flag = set if r0 == 0, clear if r0 != 0
  152. Clobbered registers:
  153. r3, lr, flags
  154. Definition:
  155. Atomically store the 64-bit value pointed by `*newval` in `*ptr` only if `*ptr`
  156. is equal to the 64-bit value pointed by `*oldval`. Return zero if `*ptr` was
  157. changed or non-zero if no exchange happened.
  158. The C flag is also set if `*ptr` was changed to allow for assembly
  159. optimization in the calling code.
  160. Usage example::
  161. typedef int (__kuser_cmpxchg64_t)(const int64_t *oldval,
  162. const int64_t *newval,
  163. volatile int64_t *ptr);
  164. #define __kuser_cmpxchg64 (*(__kuser_cmpxchg64_t *)0xffff0f60)
  165. int64_t atomic_add64(volatile int64_t *ptr, int64_t val)
  166. {
  167. int64_t old, new;
  168. do {
  169. old = *ptr;
  170. new = old + val;
  171. } while(__kuser_cmpxchg64(&old, &new, ptr));
  172. return new;
  173. }
  174. Notes:
  175. - This routine already includes memory barriers as needed.
  176. - Due to the length of this sequence, this spans 2 conventional kuser
  177. "slots", therefore 0xffff0f80 is not used as a valid entry point.
  178. - Valid only if __kuser_helper_version >= 5 (from kernel version 3.1).