tls.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/errno.h>
  4. #include <linux/sched.h>
  5. #include <linux/user.h>
  6. #include <linux/regset.h>
  7. #include <linux/syscalls.h>
  8. #include <linux/nospec.h>
  9. #include <linux/uaccess.h>
  10. #include <asm/desc.h>
  11. #include <asm/ldt.h>
  12. #include <asm/processor.h>
  13. #include <asm/proto.h>
  14. #include "tls.h"
  15. /*
  16. * sys_alloc_thread_area: get a yet unused TLS descriptor index.
  17. */
  18. static int get_free_idx(void)
  19. {
  20. struct thread_struct *t = &current->thread;
  21. int idx;
  22. for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
  23. if (desc_empty(&t->tls_array[idx]))
  24. return idx + GDT_ENTRY_TLS_MIN;
  25. return -ESRCH;
  26. }
  27. static bool tls_desc_okay(const struct user_desc *info)
  28. {
  29. /*
  30. * For historical reasons (i.e. no one ever documented how any
  31. * of the segmentation APIs work), user programs can and do
  32. * assume that a struct user_desc that's all zeros except for
  33. * entry_number means "no segment at all". This never actually
  34. * worked. In fact, up to Linux 3.19, a struct user_desc like
  35. * this would create a 16-bit read-write segment with base and
  36. * limit both equal to zero.
  37. *
  38. * That was close enough to "no segment at all" until we
  39. * hardened this function to disallow 16-bit TLS segments. Fix
  40. * it up by interpreting these zeroed segments the way that they
  41. * were almost certainly intended to be interpreted.
  42. *
  43. * The correct way to ask for "no segment at all" is to specify
  44. * a user_desc that satisfies LDT_empty. To keep everything
  45. * working, we accept both.
  46. *
  47. * Note that there's a similar kludge in modify_ldt -- look at
  48. * the distinction between modes 1 and 0x11.
  49. */
  50. if (LDT_empty(info) || LDT_zero(info))
  51. return true;
  52. /*
  53. * espfix is required for 16-bit data segments, but espfix
  54. * only works for LDT segments.
  55. */
  56. if (!info->seg_32bit)
  57. return false;
  58. /* Only allow data segments in the TLS array. */
  59. if (info->contents > 1)
  60. return false;
  61. /*
  62. * Non-present segments with DPL 3 present an interesting attack
  63. * surface. The kernel should handle such segments correctly,
  64. * but TLS is very difficult to protect in a sandbox, so prevent
  65. * such segments from being created.
  66. *
  67. * If userspace needs to remove a TLS entry, it can still delete
  68. * it outright.
  69. */
  70. if (info->seg_not_present)
  71. return false;
  72. return true;
  73. }
  74. static void set_tls_desc(struct task_struct *p, int idx,
  75. const struct user_desc *info, int n)
  76. {
  77. struct thread_struct *t = &p->thread;
  78. struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
  79. int cpu;
  80. /*
  81. * We must not get preempted while modifying the TLS.
  82. */
  83. cpu = get_cpu();
  84. while (n-- > 0) {
  85. if (LDT_empty(info) || LDT_zero(info))
  86. memset(desc, 0, sizeof(*desc));
  87. else
  88. fill_ldt(desc, info);
  89. ++info;
  90. ++desc;
  91. }
  92. if (t == &current->thread)
  93. load_TLS(t, cpu);
  94. put_cpu();
  95. }
  96. /*
  97. * Set a given TLS descriptor:
  98. */
  99. int do_set_thread_area(struct task_struct *p, int idx,
  100. struct user_desc __user *u_info,
  101. int can_allocate)
  102. {
  103. struct user_desc info;
  104. unsigned short __maybe_unused sel, modified_sel;
  105. if (copy_from_user(&info, u_info, sizeof(info)))
  106. return -EFAULT;
  107. if (!tls_desc_okay(&info))
  108. return -EINVAL;
  109. if (idx == -1)
  110. idx = info.entry_number;
  111. /*
  112. * index -1 means the kernel should try to find and
  113. * allocate an empty descriptor:
  114. */
  115. if (idx == -1 && can_allocate) {
  116. idx = get_free_idx();
  117. if (idx < 0)
  118. return idx;
  119. if (put_user(idx, &u_info->entry_number))
  120. return -EFAULT;
  121. }
  122. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  123. return -EINVAL;
  124. set_tls_desc(p, idx, &info, 1);
  125. /*
  126. * If DS, ES, FS, or GS points to the modified segment, forcibly
  127. * refresh it. Only needed on x86_64 because x86_32 reloads them
  128. * on return to user mode.
  129. */
  130. modified_sel = (idx << 3) | 3;
  131. if (p == current) {
  132. #ifdef CONFIG_X86_64
  133. savesegment(ds, sel);
  134. if (sel == modified_sel)
  135. loadsegment(ds, sel);
  136. savesegment(es, sel);
  137. if (sel == modified_sel)
  138. loadsegment(es, sel);
  139. savesegment(fs, sel);
  140. if (sel == modified_sel)
  141. loadsegment(fs, sel);
  142. savesegment(gs, sel);
  143. if (sel == modified_sel)
  144. load_gs_index(sel);
  145. #endif
  146. #ifdef CONFIG_X86_32_LAZY_GS
  147. savesegment(gs, sel);
  148. if (sel == modified_sel)
  149. loadsegment(gs, sel);
  150. #endif
  151. } else {
  152. #ifdef CONFIG_X86_64
  153. if (p->thread.fsindex == modified_sel)
  154. p->thread.fsbase = info.base_addr;
  155. if (p->thread.gsindex == modified_sel)
  156. p->thread.gsbase = info.base_addr;
  157. #endif
  158. }
  159. return 0;
  160. }
  161. SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, u_info)
  162. {
  163. return do_set_thread_area(current, -1, u_info, 1);
  164. }
  165. /*
  166. * Get the current Thread-Local Storage area:
  167. */
  168. static void fill_user_desc(struct user_desc *info, int idx,
  169. const struct desc_struct *desc)
  170. {
  171. memset(info, 0, sizeof(*info));
  172. info->entry_number = idx;
  173. info->base_addr = get_desc_base(desc);
  174. info->limit = get_desc_limit(desc);
  175. info->seg_32bit = desc->d;
  176. info->contents = desc->type >> 2;
  177. info->read_exec_only = !(desc->type & 2);
  178. info->limit_in_pages = desc->g;
  179. info->seg_not_present = !desc->p;
  180. info->useable = desc->avl;
  181. #ifdef CONFIG_X86_64
  182. info->lm = desc->l;
  183. #endif
  184. }
  185. int do_get_thread_area(struct task_struct *p, int idx,
  186. struct user_desc __user *u_info)
  187. {
  188. struct user_desc info;
  189. int index;
  190. if (idx == -1 && get_user(idx, &u_info->entry_number))
  191. return -EFAULT;
  192. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  193. return -EINVAL;
  194. index = idx - GDT_ENTRY_TLS_MIN;
  195. index = array_index_nospec(index,
  196. GDT_ENTRY_TLS_MAX - GDT_ENTRY_TLS_MIN + 1);
  197. fill_user_desc(&info, idx, &p->thread.tls_array[index]);
  198. if (copy_to_user(u_info, &info, sizeof(info)))
  199. return -EFAULT;
  200. return 0;
  201. }
  202. SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, u_info)
  203. {
  204. return do_get_thread_area(current, -1, u_info);
  205. }
  206. int regset_tls_active(struct task_struct *target,
  207. const struct user_regset *regset)
  208. {
  209. struct thread_struct *t = &target->thread;
  210. int n = GDT_ENTRY_TLS_ENTRIES;
  211. while (n > 0 && desc_empty(&t->tls_array[n - 1]))
  212. --n;
  213. return n;
  214. }
  215. int regset_tls_get(struct task_struct *target, const struct user_regset *regset,
  216. struct membuf to)
  217. {
  218. const struct desc_struct *tls;
  219. struct user_desc v;
  220. int pos;
  221. for (pos = 0, tls = target->thread.tls_array; to.left; pos++, tls++) {
  222. fill_user_desc(&v, GDT_ENTRY_TLS_MIN + pos, tls);
  223. membuf_write(&to, &v, sizeof(v));
  224. }
  225. return 0;
  226. }
  227. int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
  228. unsigned int pos, unsigned int count,
  229. const void *kbuf, const void __user *ubuf)
  230. {
  231. struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
  232. const struct user_desc *info;
  233. int i;
  234. if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
  235. (pos % sizeof(struct user_desc)) != 0 ||
  236. (count % sizeof(struct user_desc)) != 0)
  237. return -EINVAL;
  238. if (kbuf)
  239. info = kbuf;
  240. else if (__copy_from_user(infobuf, ubuf, count))
  241. return -EFAULT;
  242. else
  243. info = infobuf;
  244. for (i = 0; i < count / sizeof(struct user_desc); i++)
  245. if (!tls_desc_okay(info + i))
  246. return -EINVAL;
  247. set_tls_desc(target,
  248. GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
  249. info, count / sizeof(struct user_desc));
  250. return 0;
  251. }