gettimeofday.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Generic userspace implementations of gettimeofday() and similar.
  4. */
  5. #include <vdso/datapage.h>
  6. #include <vdso/helpers.h>
  7. #ifndef vdso_calc_delta
  8. /*
  9. * Default implementation which works for all sane clocksources. That
  10. * obviously excludes x86/TSC.
  11. */
  12. static __always_inline
  13. u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
  14. {
  15. return ((cycles - last) & mask) * mult;
  16. }
  17. #endif
  18. #ifndef vdso_shift_ns
  19. static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
  20. {
  21. return ns >> shift;
  22. }
  23. #endif
  24. #ifndef __arch_vdso_hres_capable
  25. static inline bool __arch_vdso_hres_capable(void)
  26. {
  27. return true;
  28. }
  29. #endif
  30. #ifndef vdso_clocksource_ok
  31. static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
  32. {
  33. return vd->clock_mode != VDSO_CLOCKMODE_NONE;
  34. }
  35. #endif
  36. #ifndef vdso_cycles_ok
  37. static inline bool vdso_cycles_ok(u64 cycles)
  38. {
  39. return true;
  40. }
  41. #endif
  42. #ifdef CONFIG_TIME_NS
  43. static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
  44. struct __kernel_timespec *ts)
  45. {
  46. const struct vdso_data *vd = __arch_get_timens_vdso_data();
  47. const struct timens_offset *offs = &vdns->offset[clk];
  48. const struct vdso_timestamp *vdso_ts;
  49. u64 cycles, last, ns;
  50. u32 seq;
  51. s64 sec;
  52. if (clk != CLOCK_MONOTONIC_RAW)
  53. vd = &vd[CS_HRES_COARSE];
  54. else
  55. vd = &vd[CS_RAW];
  56. vdso_ts = &vd->basetime[clk];
  57. do {
  58. seq = vdso_read_begin(vd);
  59. if (unlikely(!vdso_clocksource_ok(vd)))
  60. return -1;
  61. cycles = __arch_get_hw_counter(vd->clock_mode, vd);
  62. if (unlikely(!vdso_cycles_ok(cycles)))
  63. return -1;
  64. ns = vdso_ts->nsec;
  65. last = vd->cycle_last;
  66. ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
  67. ns = vdso_shift_ns(ns, vd->shift);
  68. sec = vdso_ts->sec;
  69. } while (unlikely(vdso_read_retry(vd, seq)));
  70. /* Add the namespace offset */
  71. sec += offs->sec;
  72. ns += offs->nsec;
  73. /*
  74. * Do this outside the loop: a race inside the loop could result
  75. * in __iter_div_u64_rem() being extremely slow.
  76. */
  77. ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
  78. ts->tv_nsec = ns;
  79. return 0;
  80. }
  81. #else
  82. static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
  83. {
  84. return NULL;
  85. }
  86. static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
  87. struct __kernel_timespec *ts)
  88. {
  89. return -EINVAL;
  90. }
  91. #endif
  92. static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
  93. struct __kernel_timespec *ts)
  94. {
  95. const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
  96. u64 cycles, last, sec, ns;
  97. u32 seq;
  98. /* Allows to compile the high resolution parts out */
  99. if (!__arch_vdso_hres_capable())
  100. return -1;
  101. do {
  102. /*
  103. * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
  104. * enabled tasks have a special VVAR page installed which
  105. * has vd->seq set to 1 and vd->clock_mode set to
  106. * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
  107. * this does not affect performance because if vd->seq is
  108. * odd, i.e. a concurrent update is in progress the extra
  109. * check for vd->clock_mode is just a few extra
  110. * instructions while spin waiting for vd->seq to become
  111. * even again.
  112. */
  113. while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
  114. if (IS_ENABLED(CONFIG_TIME_NS) &&
  115. vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
  116. return do_hres_timens(vd, clk, ts);
  117. cpu_relax();
  118. }
  119. smp_rmb();
  120. if (unlikely(!vdso_clocksource_ok(vd)))
  121. return -1;
  122. cycles = __arch_get_hw_counter(vd->clock_mode, vd);
  123. if (unlikely(!vdso_cycles_ok(cycles)))
  124. return -1;
  125. ns = vdso_ts->nsec;
  126. last = vd->cycle_last;
  127. ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
  128. ns = vdso_shift_ns(ns, vd->shift);
  129. sec = vdso_ts->sec;
  130. } while (unlikely(vdso_read_retry(vd, seq)));
  131. /*
  132. * Do this outside the loop: a race inside the loop could result
  133. * in __iter_div_u64_rem() being extremely slow.
  134. */
  135. ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
  136. ts->tv_nsec = ns;
  137. return 0;
  138. }
  139. #ifdef CONFIG_TIME_NS
  140. static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
  141. struct __kernel_timespec *ts)
  142. {
  143. const struct vdso_data *vd = __arch_get_timens_vdso_data();
  144. const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
  145. const struct timens_offset *offs = &vdns->offset[clk];
  146. u64 nsec;
  147. s64 sec;
  148. s32 seq;
  149. do {
  150. seq = vdso_read_begin(vd);
  151. sec = vdso_ts->sec;
  152. nsec = vdso_ts->nsec;
  153. } while (unlikely(vdso_read_retry(vd, seq)));
  154. /* Add the namespace offset */
  155. sec += offs->sec;
  156. nsec += offs->nsec;
  157. /*
  158. * Do this outside the loop: a race inside the loop could result
  159. * in __iter_div_u64_rem() being extremely slow.
  160. */
  161. ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
  162. ts->tv_nsec = nsec;
  163. return 0;
  164. }
  165. #else
  166. static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
  167. struct __kernel_timespec *ts)
  168. {
  169. return -1;
  170. }
  171. #endif
  172. static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
  173. struct __kernel_timespec *ts)
  174. {
  175. const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
  176. u32 seq;
  177. do {
  178. /*
  179. * Open coded to handle VDSO_CLOCK_TIMENS. See comment in
  180. * do_hres().
  181. */
  182. while ((seq = READ_ONCE(vd->seq)) & 1) {
  183. if (IS_ENABLED(CONFIG_TIME_NS) &&
  184. vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
  185. return do_coarse_timens(vd, clk, ts);
  186. cpu_relax();
  187. }
  188. smp_rmb();
  189. ts->tv_sec = vdso_ts->sec;
  190. ts->tv_nsec = vdso_ts->nsec;
  191. } while (unlikely(vdso_read_retry(vd, seq)));
  192. return 0;
  193. }
  194. static __always_inline int
  195. __cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
  196. struct __kernel_timespec *ts)
  197. {
  198. u32 msk;
  199. /* Check for negative values or invalid clocks */
  200. if (unlikely((u32) clock >= MAX_CLOCKS))
  201. return -1;
  202. /*
  203. * Convert the clockid to a bitmask and use it to check which
  204. * clocks are handled in the VDSO directly.
  205. */
  206. msk = 1U << clock;
  207. if (likely(msk & VDSO_HRES))
  208. vd = &vd[CS_HRES_COARSE];
  209. else if (msk & VDSO_COARSE)
  210. return do_coarse(&vd[CS_HRES_COARSE], clock, ts);
  211. else if (msk & VDSO_RAW)
  212. vd = &vd[CS_RAW];
  213. else
  214. return -1;
  215. return do_hres(vd, clock, ts);
  216. }
  217. static __maybe_unused int
  218. __cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
  219. struct __kernel_timespec *ts)
  220. {
  221. int ret = __cvdso_clock_gettime_common(vd, clock, ts);
  222. if (unlikely(ret))
  223. return clock_gettime_fallback(clock, ts);
  224. return 0;
  225. }
  226. static __maybe_unused int
  227. __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
  228. {
  229. return __cvdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts);
  230. }
  231. #ifdef BUILD_VDSO32
  232. static __maybe_unused int
  233. __cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock,
  234. struct old_timespec32 *res)
  235. {
  236. struct __kernel_timespec ts;
  237. int ret;
  238. ret = __cvdso_clock_gettime_common(vd, clock, &ts);
  239. if (unlikely(ret))
  240. return clock_gettime32_fallback(clock, res);
  241. /* For ret == 0 */
  242. res->tv_sec = ts.tv_sec;
  243. res->tv_nsec = ts.tv_nsec;
  244. return ret;
  245. }
  246. static __maybe_unused int
  247. __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
  248. {
  249. return __cvdso_clock_gettime32_data(__arch_get_vdso_data(), clock, res);
  250. }
  251. #endif /* BUILD_VDSO32 */
  252. static __maybe_unused int
  253. __cvdso_gettimeofday_data(const struct vdso_data *vd,
  254. struct __kernel_old_timeval *tv, struct timezone *tz)
  255. {
  256. if (likely(tv != NULL)) {
  257. struct __kernel_timespec ts;
  258. if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
  259. return gettimeofday_fallback(tv, tz);
  260. tv->tv_sec = ts.tv_sec;
  261. tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC;
  262. }
  263. if (unlikely(tz != NULL)) {
  264. if (IS_ENABLED(CONFIG_TIME_NS) &&
  265. vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
  266. vd = __arch_get_timens_vdso_data();
  267. tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
  268. tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
  269. }
  270. return 0;
  271. }
  272. static __maybe_unused int
  273. __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
  274. {
  275. return __cvdso_gettimeofday_data(__arch_get_vdso_data(), tv, tz);
  276. }
  277. #ifdef VDSO_HAS_TIME
  278. static __maybe_unused __kernel_old_time_t
  279. __cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
  280. {
  281. __kernel_old_time_t t;
  282. if (IS_ENABLED(CONFIG_TIME_NS) &&
  283. vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
  284. vd = __arch_get_timens_vdso_data();
  285. t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
  286. if (time)
  287. *time = t;
  288. return t;
  289. }
  290. static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
  291. {
  292. return __cvdso_time_data(__arch_get_vdso_data(), time);
  293. }
  294. #endif /* VDSO_HAS_TIME */
  295. #ifdef VDSO_HAS_CLOCK_GETRES
  296. static __maybe_unused
  297. int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
  298. struct __kernel_timespec *res)
  299. {
  300. u32 msk;
  301. u64 ns;
  302. /* Check for negative values or invalid clocks */
  303. if (unlikely((u32) clock >= MAX_CLOCKS))
  304. return -1;
  305. if (IS_ENABLED(CONFIG_TIME_NS) &&
  306. vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
  307. vd = __arch_get_timens_vdso_data();
  308. /*
  309. * Convert the clockid to a bitmask and use it to check which
  310. * clocks are handled in the VDSO directly.
  311. */
  312. msk = 1U << clock;
  313. if (msk & (VDSO_HRES | VDSO_RAW)) {
  314. /*
  315. * Preserves the behaviour of posix_get_hrtimer_res().
  316. */
  317. ns = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
  318. } else if (msk & VDSO_COARSE) {
  319. /*
  320. * Preserves the behaviour of posix_get_coarse_res().
  321. */
  322. ns = LOW_RES_NSEC;
  323. } else {
  324. return -1;
  325. }
  326. if (likely(res)) {
  327. res->tv_sec = 0;
  328. res->tv_nsec = ns;
  329. }
  330. return 0;
  331. }
  332. static __maybe_unused
  333. int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock,
  334. struct __kernel_timespec *res)
  335. {
  336. int ret = __cvdso_clock_getres_common(vd, clock, res);
  337. if (unlikely(ret))
  338. return clock_getres_fallback(clock, res);
  339. return 0;
  340. }
  341. static __maybe_unused
  342. int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
  343. {
  344. return __cvdso_clock_getres_data(__arch_get_vdso_data(), clock, res);
  345. }
  346. #ifdef BUILD_VDSO32
  347. static __maybe_unused int
  348. __cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock,
  349. struct old_timespec32 *res)
  350. {
  351. struct __kernel_timespec ts;
  352. int ret;
  353. ret = __cvdso_clock_getres_common(vd, clock, &ts);
  354. if (unlikely(ret))
  355. return clock_getres32_fallback(clock, res);
  356. if (likely(res)) {
  357. res->tv_sec = ts.tv_sec;
  358. res->tv_nsec = ts.tv_nsec;
  359. }
  360. return ret;
  361. }
  362. static __maybe_unused int
  363. __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
  364. {
  365. return __cvdso_clock_getres_time32_data(__arch_get_vdso_data(),
  366. clock, res);
  367. }
  368. #endif /* BUILD_VDSO32 */
  369. #endif /* VDSO_HAS_CLOCK_GETRES */