tls.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. /*
  2. * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
  3. * Licensed under the GPL
  4. */
  5. #include "linux/kernel.h"
  6. #include "linux/sched.h"
  7. #include "linux/slab.h"
  8. #include "linux/types.h"
  9. #include "asm/uaccess.h"
  10. #include "asm/ptrace.h"
  11. #include "asm/segment.h"
  12. #include "asm/smp.h"
  13. #include "asm/desc.h"
  14. #include "choose-mode.h"
  15. #include "kern.h"
  16. #include "kern_util.h"
  17. #include "mode_kern.h"
  18. #include "os.h"
  19. #include "mode.h"
  20. #ifdef CONFIG_MODE_SKAS
  21. #include "skas.h"
  22. #endif
  23. /* If needed we can detect when it's uninitialized. */
  24. static int host_supports_tls = -1;
  25. int host_gdt_entry_tls_min = -1;
  26. #ifdef CONFIG_MODE_SKAS
  27. int do_set_thread_area_skas(struct user_desc *info)
  28. {
  29. int ret;
  30. u32 cpu;
  31. cpu = get_cpu();
  32. ret = os_set_thread_area(info, userspace_pid[cpu]);
  33. put_cpu();
  34. return ret;
  35. }
  36. int do_get_thread_area_skas(struct user_desc *info)
  37. {
  38. int ret;
  39. u32 cpu;
  40. cpu = get_cpu();
  41. ret = os_get_thread_area(info, userspace_pid[cpu]);
  42. put_cpu();
  43. return ret;
  44. }
  45. #endif
  46. /*
  47. * sys_get_thread_area: get a yet unused TLS descriptor index.
  48. * XXX: Consider leaving one free slot for glibc usage at first place. This must
  49. * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
  50. *
  51. * Also, this must be tested when compiling in SKAS mode with dinamic linking
  52. * and running against NPTL.
  53. */
  54. static int get_free_idx(struct task_struct* task)
  55. {
  56. struct thread_struct *t = &task->thread;
  57. int idx;
  58. if (!t->arch.tls_array)
  59. return GDT_ENTRY_TLS_MIN;
  60. for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
  61. if (!t->arch.tls_array[idx].present)
  62. return idx + GDT_ENTRY_TLS_MIN;
  63. return -ESRCH;
  64. }
  65. static inline void clear_user_desc(struct user_desc* info)
  66. {
  67. /* Postcondition: LDT_empty(info) returns true. */
  68. memset(info, 0, sizeof(*info));
  69. /* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
  70. * indeed an empty user_desc.
  71. */
  72. info->read_exec_only = 1;
  73. info->seg_not_present = 1;
  74. }
  75. #define O_FORCE 1
  76. static int load_TLS(int flags, struct task_struct *to)
  77. {
  78. int ret = 0;
  79. int idx;
  80. for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
  81. struct uml_tls_struct* curr = &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
  82. /* Actually, now if it wasn't flushed it gets cleared and
  83. * flushed to the host, which will clear it.*/
  84. if (!curr->present) {
  85. if (!curr->flushed) {
  86. clear_user_desc(&curr->tls);
  87. curr->tls.entry_number = idx;
  88. } else {
  89. WARN_ON(!LDT_empty(&curr->tls));
  90. continue;
  91. }
  92. }
  93. if (!(flags & O_FORCE) && curr->flushed)
  94. continue;
  95. ret = do_set_thread_area(&curr->tls);
  96. if (ret)
  97. goto out;
  98. curr->flushed = 1;
  99. }
  100. out:
  101. return ret;
  102. }
  103. /* Verify if we need to do a flush for the new process, i.e. if there are any
  104. * present desc's, only if they haven't been flushed.
  105. */
  106. static inline int needs_TLS_update(struct task_struct *task)
  107. {
  108. int i;
  109. int ret = 0;
  110. for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
  111. struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
  112. /* Can't test curr->present, we may need to clear a descriptor
  113. * which had a value. */
  114. if (curr->flushed)
  115. continue;
  116. ret = 1;
  117. break;
  118. }
  119. return ret;
  120. }
  121. /* On a newly forked process, the TLS descriptors haven't yet been flushed. So
  122. * we mark them as such and the first switch_to will do the job.
  123. */
  124. void clear_flushed_tls(struct task_struct *task)
  125. {
  126. int i;
  127. for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
  128. struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
  129. /* Still correct to do this, if it wasn't present on the host it
  130. * will remain as flushed as it was. */
  131. if (!curr->present)
  132. continue;
  133. curr->flushed = 0;
  134. }
  135. }
  136. /* In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
  137. * common host process. So this is needed in SKAS0 too.
  138. *
  139. * However, if each thread had a different host process (and this was discussed
  140. * for SMP support) this won't be needed.
  141. *
  142. * And this will not need be used when (and if) we'll add support to the host
  143. * SKAS patch. */
  144. int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to)
  145. {
  146. if (!host_supports_tls)
  147. return 0;
  148. /* We have no need whatsoever to switch TLS for kernel threads; beyond
  149. * that, that would also result in us calling os_set_thread_area with
  150. * userspace_pid[cpu] == 0, which gives an error. */
  151. if (likely(to->mm))
  152. return load_TLS(O_FORCE, to);
  153. return 0;
  154. }
  155. int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to)
  156. {
  157. if (!host_supports_tls)
  158. return 0;
  159. if (needs_TLS_update(to))
  160. return load_TLS(0, to);
  161. return 0;
  162. }
  163. static int set_tls_entry(struct task_struct* task, struct user_desc *info,
  164. int idx, int flushed)
  165. {
  166. struct thread_struct *t = &task->thread;
  167. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  168. return -EINVAL;
  169. t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
  170. t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
  171. t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
  172. return 0;
  173. }
  174. int arch_copy_tls(struct task_struct *new)
  175. {
  176. struct user_desc info;
  177. int idx, ret = -EFAULT;
  178. if (copy_from_user(&info,
  179. (void __user *) UPT_ESI(&new->thread.regs.regs),
  180. sizeof(info)))
  181. goto out;
  182. ret = -EINVAL;
  183. if (LDT_empty(&info))
  184. goto out;
  185. idx = info.entry_number;
  186. ret = set_tls_entry(new, &info, idx, 0);
  187. out:
  188. return ret;
  189. }
  190. /* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
  191. static int get_tls_entry(struct task_struct* task, struct user_desc *info, int idx)
  192. {
  193. struct thread_struct *t = &task->thread;
  194. if (!t->arch.tls_array)
  195. goto clear;
  196. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  197. return -EINVAL;
  198. if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
  199. goto clear;
  200. *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
  201. out:
  202. /* Temporary debugging check, to make sure that things have been
  203. * flushed. This could be triggered if load_TLS() failed.
  204. */
  205. if (unlikely(task == current && !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
  206. printk(KERN_ERR "get_tls_entry: task with pid %d got here "
  207. "without flushed TLS.", current->pid);
  208. }
  209. return 0;
  210. clear:
  211. /* When the TLS entry has not been set, the values read to user in the
  212. * tls_array are 0 (because it's cleared at boot, see
  213. * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
  214. */
  215. clear_user_desc(info);
  216. info->entry_number = idx;
  217. goto out;
  218. }
  219. asmlinkage int sys_set_thread_area(struct user_desc __user *user_desc)
  220. {
  221. struct user_desc info;
  222. int idx, ret;
  223. if (!host_supports_tls)
  224. return -ENOSYS;
  225. if (copy_from_user(&info, user_desc, sizeof(info)))
  226. return -EFAULT;
  227. idx = info.entry_number;
  228. if (idx == -1) {
  229. idx = get_free_idx(current);
  230. if (idx < 0)
  231. return idx;
  232. info.entry_number = idx;
  233. /* Tell the user which slot we chose for him.*/
  234. if (put_user(idx, &user_desc->entry_number))
  235. return -EFAULT;
  236. }
  237. ret = CHOOSE_MODE_PROC(do_set_thread_area_tt, do_set_thread_area_skas, &info);
  238. if (ret)
  239. return ret;
  240. return set_tls_entry(current, &info, idx, 1);
  241. }
  242. /*
  243. * Perform set_thread_area on behalf of the traced child.
  244. * Note: error handling is not done on the deferred load, and this differ from
  245. * i386. However the only possible error are caused by bugs.
  246. */
  247. int ptrace_set_thread_area(struct task_struct *child, int idx,
  248. struct user_desc __user *user_desc)
  249. {
  250. struct user_desc info;
  251. if (!host_supports_tls)
  252. return -EIO;
  253. if (copy_from_user(&info, user_desc, sizeof(info)))
  254. return -EFAULT;
  255. return set_tls_entry(child, &info, idx, 0);
  256. }
  257. asmlinkage int sys_get_thread_area(struct user_desc __user *user_desc)
  258. {
  259. struct user_desc info;
  260. int idx, ret;
  261. if (!host_supports_tls)
  262. return -ENOSYS;
  263. if (get_user(idx, &user_desc->entry_number))
  264. return -EFAULT;
  265. ret = get_tls_entry(current, &info, idx);
  266. if (ret < 0)
  267. goto out;
  268. if (copy_to_user(user_desc, &info, sizeof(info)))
  269. ret = -EFAULT;
  270. out:
  271. return ret;
  272. }
  273. /*
  274. * Perform get_thread_area on behalf of the traced child.
  275. */
  276. int ptrace_get_thread_area(struct task_struct *child, int idx,
  277. struct user_desc __user *user_desc)
  278. {
  279. struct user_desc info;
  280. int ret;
  281. if (!host_supports_tls)
  282. return -EIO;
  283. ret = get_tls_entry(child, &info, idx);
  284. if (ret < 0)
  285. goto out;
  286. if (copy_to_user(user_desc, &info, sizeof(info)))
  287. ret = -EFAULT;
  288. out:
  289. return ret;
  290. }
  291. /* XXX: This part is probably common to i386 and x86-64. Don't create a common
  292. * file for now, do that when implementing x86-64 support.*/
  293. static int __init __setup_host_supports_tls(void) {
  294. check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
  295. if (host_supports_tls) {
  296. printk(KERN_INFO "Host TLS support detected\n");
  297. printk(KERN_INFO "Detected host type: ");
  298. switch (host_gdt_entry_tls_min) {
  299. case GDT_ENTRY_TLS_MIN_I386:
  300. printk("i386\n");
  301. break;
  302. case GDT_ENTRY_TLS_MIN_X86_64:
  303. printk("x86_64\n");
  304. break;
  305. }
  306. } else
  307. printk(KERN_ERR " Host TLS support NOT detected! "
  308. "TLS support inside UML will not work\n");
  309. return 0;
  310. }
  311. __initcall(__setup_host_supports_tls);