0004-third-party-lss.patch 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. diff --git a/linux_syscall_support.h b/linux_syscall_support.h
  2. index 8d4e4d2..ce1345a 100644
  3. --- a/linux_syscall_support.h
  4. +++ b/linux_syscall_support.h
  5. @@ -88,7 +88,7 @@
  6. */
  7. #if (defined(__i386__) || defined(__x86_64__) || defined(__ARM_ARCH_3__) || \
  8. defined(__mips__) || defined(__PPC__) || defined(__ARM_EABI__) || \
  9. - defined(__aarch64__) || defined(__s390__)) || defined(__e2k__) \
  10. + defined(__aarch64__) || defined(__s390__) || defined(__riscv)) || defined(__e2k__) \
  11. && (defined(__linux) || defined(__ANDROID__))
  12. #ifndef SYS_CPLUSPLUS
  13. @@ -302,7 +302,7 @@ struct kernel_old_sigaction {
  14. } __attribute__((packed,aligned(4)));
  15. #elif (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32)
  16. #define kernel_old_sigaction kernel_sigaction
  17. -#elif defined(__aarch64__)
  18. +#elif defined(__aarch64__) || defined(__riscv)
  19. // No kernel_old_sigaction defined for arm64.
  20. #endif
  21. @@ -542,7 +542,7 @@ struct kernel_stat {
  22. int st_blocks;
  23. int st_pad4[14];
  24. };
  25. -#elif defined(__aarch64__)
  26. +#elif defined(__aarch64__) || defined(__riscv)
  27. struct kernel_stat {
  28. unsigned long st_dev;
  29. unsigned long st_ino;
  30. @@ -1110,7 +1110,7 @@ struct kernel_statfs {
  31. #define __NR_getrandom (__NR_SYSCALL_BASE + 384)
  32. #endif
  33. /* End of ARM 3/EABI definitions */
  34. -#elif defined(__aarch64__)
  35. +#elif defined(__aarch64__) || defined(__riscv)
  36. #ifndef __NR_setxattr
  37. #define __NR_setxattr 5
  38. #endif
  39. @@ -1925,7 +1925,7 @@ struct kernel_statfs {
  40. #undef LSS_RETURN
  41. #if (defined(__i386__) || defined(__x86_64__) || defined(__ARM_ARCH_3__) \
  42. - || defined(__ARM_EABI__) || defined(__aarch64__) || defined(__s390__)) \
  43. + || defined(__ARM_EABI__) || defined(__aarch64__) || defined(__s390__) || defined(__riscv)) \
  44. || defined(__e2k__)
  45. /* Failing system calls return a negative result in the range of
  46. * -1..-4095. These are "errno" values with the sign inverted.
  47. @@ -3419,6 +3419,122 @@ struct kernel_statfs {
  48. }
  49. LSS_RETURN(int, __ret);
  50. }
  51. + #elif defined(__riscv)
  52. + #undef LSS_REG
  53. + #define LSS_REG(r,a) register int64_t __r##r __asm__("a"#r) = (int64_t)a
  54. + #undef LSS_BODY
  55. + #define LSS_BODY(type,name,args...) \
  56. + register int64_t __res_a0 __asm__("a0"); \
  57. + register int64_t __a7 __asm__("a7") = __NR_##name; \
  58. + int64_t __res; \
  59. + __asm__ __volatile__ ("scall\n" \
  60. + : "=r"(__res_a0) \
  61. + : "r"(__a7) , ## args \
  62. + : "memory"); \
  63. + __res = __res_a0; \
  64. + LSS_RETURN(type, __res)
  65. + #undef _syscall0
  66. + #define _syscall0(type, name) \
  67. + type LSS_NAME(name)(void) { \
  68. + LSS_BODY(type, name); \
  69. + }
  70. + #undef _syscall1
  71. + #define _syscall1(type, name, type1, arg1) \
  72. + type LSS_NAME(name)(type1 arg1) { \
  73. + LSS_REG(0, arg1); LSS_BODY(type, name, "r"(__r0)); \
  74. + }
  75. + #undef _syscall2
  76. + #define _syscall2(type, name, type1, arg1, type2, arg2) \
  77. + type LSS_NAME(name)(type1 arg1, type2 arg2) { \
  78. + LSS_REG(0, arg1); LSS_REG(1, arg2); \
  79. + LSS_BODY(type, name, "r"(__r0), "r"(__r1)); \
  80. + }
  81. + #undef _syscall3
  82. + #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
  83. + type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \
  84. + LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \
  85. + LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2)); \
  86. + }
  87. + #undef _syscall4
  88. + #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
  89. + type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
  90. + LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \
  91. + LSS_REG(3, arg4); \
  92. + LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3)); \
  93. + }
  94. + #undef _syscall5
  95. + #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
  96. + type5,arg5) \
  97. + type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  98. + type5 arg5) { \
  99. + LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \
  100. + LSS_REG(3, arg4); LSS_REG(4, arg5); \
  101. + LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \
  102. + "r"(__r4)); \
  103. + }
  104. + #undef _syscall6
  105. + #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
  106. + type5,arg5,type6,arg6) \
  107. + type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  108. + type5 arg5, type6 arg6) { \
  109. + LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \
  110. + LSS_REG(3, arg4); LSS_REG(4, arg5); LSS_REG(5, arg6); \
  111. + LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \
  112. + "r"(__r4), "r"(__r5)); \
  113. + }
  114. +
  115. + LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack,
  116. + int flags, void *arg, int *parent_tidptr,
  117. + void *newtls, int *child_tidptr) {
  118. + int64_t __res;
  119. + {
  120. + register int64_t __res_a0 __asm__("a0");
  121. + register uint64_t __flags __asm__("a0") = flags;
  122. + register void *__stack __asm__("a1") = child_stack;
  123. + register void *__ptid __asm__("a2") = parent_tidptr;
  124. + register void *__tls __asm__("a3") = newtls;
  125. + register int *__ctid __asm__("a4") = child_tidptr;
  126. + __asm__ __volatile__(/* Push "arg" and "fn" onto the stack that will be
  127. + * used by the child.
  128. + */
  129. + "addi %2,%2,-16\n"
  130. + "sd %1, 0(%2)\n"
  131. + "sd %4, 8(%2)\n"
  132. +
  133. + /* %a0 = syscall(%a0 = flags,
  134. + * %a1 = child_stack,
  135. + * %a2 = parent_tidptr,
  136. + * %a3 = newtls,
  137. + * %a4 = child_tidptr)
  138. + */
  139. + "li a7, %8\n"
  140. + "scall\n"
  141. +
  142. + /* if (%a0 != 0)
  143. + * return %a0;
  144. + */
  145. + "bnez %0, 1f\n"
  146. +
  147. + /* In the child, now. Call "fn(arg)".
  148. + */
  149. + "ld a1, 0(sp)\n"
  150. + "ld a0, 8(sp)\n"
  151. + "jalr a1\n"
  152. +
  153. + /* Call _exit(%a0).
  154. + */
  155. + "li a7, %9\n"
  156. + "scall\n"
  157. + "1:\n"
  158. + : "=r" (__res_a0)
  159. + : "r"(fn), "r"(__stack), "r"(__flags), "r"(arg),
  160. + "r"(__ptid), "r"(__tls), "r"(__ctid),
  161. + "i"(__NR_clone), "i"(__NR_exit)
  162. + : "cc", "memory");
  163. + __res = __res_a0;
  164. + }
  165. + LSS_RETURN(int, __res);
  166. + }
  167. #elif defined(__e2k__)
  168. #undef _LSS_BODY
  169. @@ -4484,7 +4600,7 @@ struct kernel_statfs {
  170. LSS_SC_BODY(4, int, 8, d, type, protocol, sv);
  171. }
  172. #endif
  173. - #if defined(__ARM_EABI__) || defined (__aarch64__)
  174. + #if defined(__ARM_EABI__) || defined (__aarch64__) || defined(__riscv)
  175. LSS_INLINE _syscall3(ssize_t, recvmsg, int, s, struct kernel_msghdr*, msg,
  176. int, flags)
  177. LSS_INLINE _syscall3(ssize_t, sendmsg, int, s, const struct kernel_msghdr*,
  178. @@ -4812,7 +4928,7 @@ struct kernel_statfs {
  179. // TODO: define this in an arch-independant way instead of inlining the clone
  180. // syscall body.
  181. -# if defined(__aarch64__)
  182. +# if defined(__aarch64__) || defined(__riscv)
  183. LSS_INLINE pid_t LSS_NAME(fork)(void) {
  184. // No fork syscall on aarch64 - implement by means of the clone syscall.
  185. // Note that this does not reset glibc's cached view of the PID/TID, so