posix-cpu-timers.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629
  1. /*
  2. * Implement CPU time clocks for the POSIX clock interface.
  3. */
  4. #include <linux/sched.h>
  5. #include <linux/posix-timers.h>
  6. #include <asm/uaccess.h>
  7. #include <linux/errno.h>
  8. static int check_clock(const clockid_t which_clock)
  9. {
  10. int error = 0;
  11. struct task_struct *p;
  12. const pid_t pid = CPUCLOCK_PID(which_clock);
  13. if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
  14. return -EINVAL;
  15. if (pid == 0)
  16. return 0;
  17. read_lock(&tasklist_lock);
  18. p = find_task_by_pid(pid);
  19. if (!p || (CPUCLOCK_PERTHREAD(which_clock) ?
  20. p->tgid != current->tgid : p->tgid != pid)) {
  21. error = -EINVAL;
  22. }
  23. read_unlock(&tasklist_lock);
  24. return error;
  25. }
  26. static inline union cpu_time_count
  27. timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
  28. {
  29. union cpu_time_count ret;
  30. ret.sched = 0; /* high half always zero when .cpu used */
  31. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  32. ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
  33. } else {
  34. ret.cpu = timespec_to_cputime(tp);
  35. }
  36. return ret;
  37. }
  38. static void sample_to_timespec(const clockid_t which_clock,
  39. union cpu_time_count cpu,
  40. struct timespec *tp)
  41. {
  42. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  43. tp->tv_sec = div_long_long_rem(cpu.sched,
  44. NSEC_PER_SEC, &tp->tv_nsec);
  45. } else {
  46. cputime_to_timespec(cpu.cpu, tp);
  47. }
  48. }
  49. static inline int cpu_time_before(const clockid_t which_clock,
  50. union cpu_time_count now,
  51. union cpu_time_count then)
  52. {
  53. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  54. return now.sched < then.sched;
  55. } else {
  56. return cputime_lt(now.cpu, then.cpu);
  57. }
  58. }
  59. static inline void cpu_time_add(const clockid_t which_clock,
  60. union cpu_time_count *acc,
  61. union cpu_time_count val)
  62. {
  63. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  64. acc->sched += val.sched;
  65. } else {
  66. acc->cpu = cputime_add(acc->cpu, val.cpu);
  67. }
  68. }
  69. static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
  70. union cpu_time_count a,
  71. union cpu_time_count b)
  72. {
  73. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  74. a.sched -= b.sched;
  75. } else {
  76. a.cpu = cputime_sub(a.cpu, b.cpu);
  77. }
  78. return a;
  79. }
  80. /*
  81. * Divide and limit the result to res >= 1
  82. *
  83. * This is necessary to prevent signal delivery starvation, when the result of
  84. * the division would be rounded down to 0.
  85. */
  86. static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
  87. {
  88. cputime_t res = cputime_div(time, div);
  89. return max_t(cputime_t, res, 1);
  90. }
  91. /*
  92. * Update expiry time from increment, and increase overrun count,
  93. * given the current clock sample.
  94. */
  95. static void bump_cpu_timer(struct k_itimer *timer,
  96. union cpu_time_count now)
  97. {
  98. int i;
  99. if (timer->it.cpu.incr.sched == 0)
  100. return;
  101. if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
  102. unsigned long long delta, incr;
  103. if (now.sched < timer->it.cpu.expires.sched)
  104. return;
  105. incr = timer->it.cpu.incr.sched;
  106. delta = now.sched + incr - timer->it.cpu.expires.sched;
  107. /* Don't use (incr*2 < delta), incr*2 might overflow. */
  108. for (i = 0; incr < delta - incr; i++)
  109. incr = incr << 1;
  110. for (; i >= 0; incr >>= 1, i--) {
  111. if (delta < incr)
  112. continue;
  113. timer->it.cpu.expires.sched += incr;
  114. timer->it_overrun += 1 << i;
  115. delta -= incr;
  116. }
  117. } else {
  118. cputime_t delta, incr;
  119. if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
  120. return;
  121. incr = timer->it.cpu.incr.cpu;
  122. delta = cputime_sub(cputime_add(now.cpu, incr),
  123. timer->it.cpu.expires.cpu);
  124. /* Don't use (incr*2 < delta), incr*2 might overflow. */
  125. for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
  126. incr = cputime_add(incr, incr);
  127. for (; i >= 0; incr = cputime_halve(incr), i--) {
  128. if (cputime_lt(delta, incr))
  129. continue;
  130. timer->it.cpu.expires.cpu =
  131. cputime_add(timer->it.cpu.expires.cpu, incr);
  132. timer->it_overrun += 1 << i;
  133. delta = cputime_sub(delta, incr);
  134. }
  135. }
  136. }
  137. static inline cputime_t prof_ticks(struct task_struct *p)
  138. {
  139. return cputime_add(p->utime, p->stime);
  140. }
  141. static inline cputime_t virt_ticks(struct task_struct *p)
  142. {
  143. return p->utime;
  144. }
  145. static inline unsigned long long sched_ns(struct task_struct *p)
  146. {
  147. return task_sched_runtime(p);
  148. }
  149. int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
  150. {
  151. int error = check_clock(which_clock);
  152. if (!error) {
  153. tp->tv_sec = 0;
  154. tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
  155. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  156. /*
  157. * If sched_clock is using a cycle counter, we
  158. * don't have any idea of its true resolution
  159. * exported, but it is much more than 1s/HZ.
  160. */
  161. tp->tv_nsec = 1;
  162. }
  163. }
  164. return error;
  165. }
  166. int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
  167. {
  168. /*
  169. * You can never reset a CPU clock, but we check for other errors
  170. * in the call before failing with EPERM.
  171. */
  172. int error = check_clock(which_clock);
  173. if (error == 0) {
  174. error = -EPERM;
  175. }
  176. return error;
  177. }
  178. /*
  179. * Sample a per-thread clock for the given task.
  180. */
  181. static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
  182. union cpu_time_count *cpu)
  183. {
  184. switch (CPUCLOCK_WHICH(which_clock)) {
  185. default:
  186. return -EINVAL;
  187. case CPUCLOCK_PROF:
  188. cpu->cpu = prof_ticks(p);
  189. break;
  190. case CPUCLOCK_VIRT:
  191. cpu->cpu = virt_ticks(p);
  192. break;
  193. case CPUCLOCK_SCHED:
  194. cpu->sched = sched_ns(p);
  195. break;
  196. }
  197. return 0;
  198. }
  199. /*
  200. * Sample a process (thread group) clock for the given group_leader task.
  201. * Must be called with tasklist_lock held for reading.
  202. * Must be called with tasklist_lock held for reading, and p->sighand->siglock.
  203. */
  204. static int cpu_clock_sample_group_locked(unsigned int clock_idx,
  205. struct task_struct *p,
  206. union cpu_time_count *cpu)
  207. {
  208. struct task_struct *t = p;
  209. switch (clock_idx) {
  210. default:
  211. return -EINVAL;
  212. case CPUCLOCK_PROF:
  213. cpu->cpu = cputime_add(p->signal->utime, p->signal->stime);
  214. do {
  215. cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t));
  216. t = next_thread(t);
  217. } while (t != p);
  218. break;
  219. case CPUCLOCK_VIRT:
  220. cpu->cpu = p->signal->utime;
  221. do {
  222. cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t));
  223. t = next_thread(t);
  224. } while (t != p);
  225. break;
  226. case CPUCLOCK_SCHED:
  227. cpu->sched = p->signal->sum_sched_runtime;
  228. /* Add in each other live thread. */
  229. while ((t = next_thread(t)) != p) {
  230. cpu->sched += t->se.sum_exec_runtime;
  231. }
  232. cpu->sched += sched_ns(p);
  233. break;
  234. }
  235. return 0;
  236. }
  237. /*
  238. * Sample a process (thread group) clock for the given group_leader task.
  239. * Must be called with tasklist_lock held for reading.
  240. */
  241. static int cpu_clock_sample_group(const clockid_t which_clock,
  242. struct task_struct *p,
  243. union cpu_time_count *cpu)
  244. {
  245. int ret;
  246. unsigned long flags;
  247. spin_lock_irqsave(&p->sighand->siglock, flags);
  248. ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p,
  249. cpu);
  250. spin_unlock_irqrestore(&p->sighand->siglock, flags);
  251. return ret;
  252. }
  253. int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
  254. {
  255. const pid_t pid = CPUCLOCK_PID(which_clock);
  256. int error = -EINVAL;
  257. union cpu_time_count rtn;
  258. if (pid == 0) {
  259. /*
  260. * Special case constant value for our own clocks.
  261. * We don't have to do any lookup to find ourselves.
  262. */
  263. if (CPUCLOCK_PERTHREAD(which_clock)) {
  264. /*
  265. * Sampling just ourselves we can do with no locking.
  266. */
  267. error = cpu_clock_sample(which_clock,
  268. current, &rtn);
  269. } else {
  270. read_lock(&tasklist_lock);
  271. error = cpu_clock_sample_group(which_clock,
  272. current, &rtn);
  273. read_unlock(&tasklist_lock);
  274. }
  275. } else {
  276. /*
  277. * Find the given PID, and validate that the caller
  278. * should be able to see it.
  279. */
  280. struct task_struct *p;
  281. rcu_read_lock();
  282. p = find_task_by_pid(pid);
  283. if (p) {
  284. if (CPUCLOCK_PERTHREAD(which_clock)) {
  285. if (p->tgid == current->tgid) {
  286. error = cpu_clock_sample(which_clock,
  287. p, &rtn);
  288. }
  289. } else {
  290. read_lock(&tasklist_lock);
  291. if (p->tgid == pid && p->signal) {
  292. error =
  293. cpu_clock_sample_group(which_clock,
  294. p, &rtn);
  295. }
  296. read_unlock(&tasklist_lock);
  297. }
  298. }
  299. rcu_read_unlock();
  300. }
  301. if (error)
  302. return error;
  303. sample_to_timespec(which_clock, rtn, tp);
  304. return 0;
  305. }
  306. /*
  307. * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
  308. * This is called from sys_timer_create with the new timer already locked.
  309. */
  310. int posix_cpu_timer_create(struct k_itimer *new_timer)
  311. {
  312. int ret = 0;
  313. const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
  314. struct task_struct *p;
  315. if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
  316. return -EINVAL;
  317. INIT_LIST_HEAD(&new_timer->it.cpu.entry);
  318. new_timer->it.cpu.incr.sched = 0;
  319. new_timer->it.cpu.expires.sched = 0;
  320. read_lock(&tasklist_lock);
  321. if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
  322. if (pid == 0) {
  323. p = current;
  324. } else {
  325. p = find_task_by_pid(pid);
  326. if (p && p->tgid != current->tgid)
  327. p = NULL;
  328. }
  329. } else {
  330. if (pid == 0) {
  331. p = current->group_leader;
  332. } else {
  333. p = find_task_by_pid(pid);
  334. if (p && p->tgid != pid)
  335. p = NULL;
  336. }
  337. }
  338. new_timer->it.cpu.task = p;
  339. if (p) {
  340. get_task_struct(p);
  341. } else {
  342. ret = -EINVAL;
  343. }
  344. read_unlock(&tasklist_lock);
  345. return ret;
  346. }
  347. /*
  348. * Clean up a CPU-clock timer that is about to be destroyed.
  349. * This is called from timer deletion with the timer already locked.
  350. * If we return TIMER_RETRY, it's necessary to release the timer's lock
  351. * and try again. (This happens when the timer is in the middle of firing.)
  352. */
  353. int posix_cpu_timer_del(struct k_itimer *timer)
  354. {
  355. struct task_struct *p = timer->it.cpu.task;
  356. int ret = 0;
  357. if (likely(p != NULL)) {
  358. read_lock(&tasklist_lock);
  359. if (unlikely(p->signal == NULL)) {
  360. /*
  361. * We raced with the reaping of the task.
  362. * The deletion should have cleared us off the list.
  363. */
  364. BUG_ON(!list_empty(&timer->it.cpu.entry));
  365. } else {
  366. spin_lock(&p->sighand->siglock);
  367. if (timer->it.cpu.firing)
  368. ret = TIMER_RETRY;
  369. else
  370. list_del(&timer->it.cpu.entry);
  371. spin_unlock(&p->sighand->siglock);
  372. }
  373. read_unlock(&tasklist_lock);
  374. if (!ret)
  375. put_task_struct(p);
  376. }
  377. return ret;
  378. }
  379. /*
  380. * Clean out CPU timers still ticking when a thread exited. The task
  381. * pointer is cleared, and the expiry time is replaced with the residual
  382. * time for later timer_gettime calls to return.
  383. * This must be called with the siglock held.
  384. */
  385. static void cleanup_timers(struct list_head *head,
  386. cputime_t utime, cputime_t stime,
  387. unsigned long long sum_exec_runtime)
  388. {
  389. struct cpu_timer_list *timer, *next;
  390. cputime_t ptime = cputime_add(utime, stime);
  391. list_for_each_entry_safe(timer, next, head, entry) {
  392. list_del_init(&timer->entry);
  393. if (cputime_lt(timer->expires.cpu, ptime)) {
  394. timer->expires.cpu = cputime_zero;
  395. } else {
  396. timer->expires.cpu = cputime_sub(timer->expires.cpu,
  397. ptime);
  398. }
  399. }
  400. ++head;
  401. list_for_each_entry_safe(timer, next, head, entry) {
  402. list_del_init(&timer->entry);
  403. if (cputime_lt(timer->expires.cpu, utime)) {
  404. timer->expires.cpu = cputime_zero;
  405. } else {
  406. timer->expires.cpu = cputime_sub(timer->expires.cpu,
  407. utime);
  408. }
  409. }
  410. ++head;
  411. list_for_each_entry_safe(timer, next, head, entry) {
  412. list_del_init(&timer->entry);
  413. if (timer->expires.sched < sum_exec_runtime) {
  414. timer->expires.sched = 0;
  415. } else {
  416. timer->expires.sched -= sum_exec_runtime;
  417. }
  418. }
  419. }
  420. /*
  421. * These are both called with the siglock held, when the current thread
  422. * is being reaped. When the final (leader) thread in the group is reaped,
  423. * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
  424. */
  425. void posix_cpu_timers_exit(struct task_struct *tsk)
  426. {
  427. cleanup_timers(tsk->cpu_timers,
  428. tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
  429. }
  430. void posix_cpu_timers_exit_group(struct task_struct *tsk)
  431. {
  432. cleanup_timers(tsk->signal->cpu_timers,
  433. cputime_add(tsk->utime, tsk->signal->utime),
  434. cputime_add(tsk->stime, tsk->signal->stime),
  435. tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime);
  436. }
  437. /*
  438. * Set the expiry times of all the threads in the process so one of them
  439. * will go off before the process cumulative expiry total is reached.
  440. */
  441. static void process_timer_rebalance(struct task_struct *p,
  442. unsigned int clock_idx,
  443. union cpu_time_count expires,
  444. union cpu_time_count val)
  445. {
  446. cputime_t ticks, left;
  447. unsigned long long ns, nsleft;
  448. struct task_struct *t = p;
  449. unsigned int nthreads = atomic_read(&p->signal->live);
  450. if (!nthreads)
  451. return;
  452. switch (clock_idx) {
  453. default:
  454. BUG();
  455. break;
  456. case CPUCLOCK_PROF:
  457. left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
  458. nthreads);
  459. do {
  460. if (likely(!(t->flags & PF_EXITING))) {
  461. ticks = cputime_add(prof_ticks(t), left);
  462. if (cputime_eq(t->it_prof_expires,
  463. cputime_zero) ||
  464. cputime_gt(t->it_prof_expires, ticks)) {
  465. t->it_prof_expires = ticks;
  466. }
  467. }
  468. t = next_thread(t);
  469. } while (t != p);
  470. break;
  471. case CPUCLOCK_VIRT:
  472. left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
  473. nthreads);
  474. do {
  475. if (likely(!(t->flags & PF_EXITING))) {
  476. ticks = cputime_add(virt_ticks(t), left);
  477. if (cputime_eq(t->it_virt_expires,
  478. cputime_zero) ||
  479. cputime_gt(t->it_virt_expires, ticks)) {
  480. t->it_virt_expires = ticks;
  481. }
  482. }
  483. t = next_thread(t);
  484. } while (t != p);
  485. break;
  486. case CPUCLOCK_SCHED:
  487. nsleft = expires.sched - val.sched;
  488. do_div(nsleft, nthreads);
  489. nsleft = max_t(unsigned long long, nsleft, 1);
  490. do {
  491. if (likely(!(t->flags & PF_EXITING))) {
  492. ns = t->se.sum_exec_runtime + nsleft;
  493. if (t->it_sched_expires == 0 ||
  494. t->it_sched_expires > ns) {
  495. t->it_sched_expires = ns;
  496. }
  497. }
  498. t = next_thread(t);
  499. } while (t != p);
  500. break;
  501. }
  502. }
  503. static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
  504. {
  505. /*
  506. * That's all for this thread or process.
  507. * We leave our residual in expires to be reported.
  508. */
  509. put_task_struct(timer->it.cpu.task);
  510. timer->it.cpu.task = NULL;
  511. timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
  512. timer->it.cpu.expires,
  513. now);
  514. }
  515. /*
  516. * Insert the timer on the appropriate list before any timers that
  517. * expire later. This must be called with the tasklist_lock held
  518. * for reading, and interrupts disabled.
  519. */
  520. static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
  521. {
  522. struct task_struct *p = timer->it.cpu.task;
  523. struct list_head *head, *listpos;
  524. struct cpu_timer_list *const nt = &timer->it.cpu;
  525. struct cpu_timer_list *next;
  526. unsigned long i;
  527. head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
  528. p->cpu_timers : p->signal->cpu_timers);
  529. head += CPUCLOCK_WHICH(timer->it_clock);
  530. BUG_ON(!irqs_disabled());
  531. spin_lock(&p->sighand->siglock);
  532. listpos = head;
  533. if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
  534. list_for_each_entry(next, head, entry) {
  535. if (next->expires.sched > nt->expires.sched)
  536. break;
  537. listpos = &next->entry;
  538. }
  539. } else {
  540. list_for_each_entry(next, head, entry) {
  541. if (cputime_gt(next->expires.cpu, nt->expires.cpu))
  542. break;
  543. listpos = &next->entry;
  544. }
  545. }
  546. list_add(&nt->entry, listpos);
  547. if (listpos == head) {
  548. /*
  549. * We are the new earliest-expiring timer.
  550. * If we are a thread timer, there can always
  551. * be a process timer telling us to stop earlier.
  552. */
  553. if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
  554. switch (CPUCLOCK_WHICH(timer->it_clock)) {
  555. default:
  556. BUG();
  557. case CPUCLOCK_PROF:
  558. if (cputime_eq(p->it_prof_expires,
  559. cputime_zero) ||
  560. cputime_gt(p->it_prof_expires,
  561. nt->expires.cpu))
  562. p->it_prof_expires = nt->expires.cpu;
  563. break;
  564. case CPUCLOCK_VIRT:
  565. if (cputime_eq(p->it_virt_expires,
  566. cputime_zero) ||
  567. cputime_gt(p->it_virt_expires,
  568. nt->expires.cpu))
  569. p->it_virt_expires = nt->expires.cpu;
  570. break;
  571. case CPUCLOCK_SCHED:
  572. if (p->it_sched_expires == 0 ||
  573. p->it_sched_expires > nt->expires.sched)
  574. p->it_sched_expires = nt->expires.sched;
  575. break;
  576. }
  577. } else {
  578. /*
  579. * For a process timer, we must balance
  580. * all the live threads' expirations.
  581. */
  582. switch (CPUCLOCK_WHICH(timer->it_clock)) {
  583. default:
  584. BUG();
  585. case CPUCLOCK_VIRT:
  586. if (!cputime_eq(p->signal->it_virt_expires,
  587. cputime_zero) &&
  588. cputime_lt(p->signal->it_virt_expires,
  589. timer->it.cpu.expires.cpu))
  590. break;
  591. goto rebalance;
  592. case CPUCLOCK_PROF:
  593. if (!cputime_eq(p->signal->it_prof_expires,
  594. cputime_zero) &&
  595. cputime_lt(p->signal->it_prof_expires,
  596. timer->it.cpu.expires.cpu))
  597. break;
  598. i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
  599. if (i != RLIM_INFINITY &&
  600. i <= cputime_to_secs(timer->it.cpu.expires.cpu))
  601. break;
  602. goto rebalance;
  603. case CPUCLOCK_SCHED:
  604. rebalance:
  605. process_timer_rebalance(
  606. timer->it.cpu.task,
  607. CPUCLOCK_WHICH(timer->it_clock),
  608. timer->it.cpu.expires, now);
  609. break;
  610. }
  611. }
  612. }
  613. spin_unlock(&p->sighand->siglock);
  614. }
  615. /*
  616. * The timer is locked, fire it and arrange for its reload.
  617. */
  618. static void cpu_timer_fire(struct k_itimer *timer)
  619. {
  620. if (unlikely(timer->sigq == NULL)) {
  621. /*
  622. * This a special case for clock_nanosleep,
  623. * not a normal timer from sys_timer_create.
  624. */
  625. wake_up_process(timer->it_process);
  626. timer->it.cpu.expires.sched = 0;
  627. } else if (timer->it.cpu.incr.sched == 0) {
  628. /*
  629. * One-shot timer. Clear it as soon as it's fired.
  630. */
  631. posix_timer_event(timer, 0);
  632. timer->it.cpu.expires.sched = 0;
  633. } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
  634. /*
  635. * The signal did not get queued because the signal
  636. * was ignored, so we won't get any callback to
  637. * reload the timer. But we need to keep it
  638. * ticking in case the signal is deliverable next time.
  639. */
  640. posix_cpu_timer_schedule(timer);
  641. }
  642. }
  643. /*
  644. * Guts of sys_timer_settime for CPU timers.
  645. * This is called with the timer locked and interrupts disabled.
  646. * If we return TIMER_RETRY, it's necessary to release the timer's lock
  647. * and try again. (This happens when the timer is in the middle of firing.)
  648. */
  649. int posix_cpu_timer_set(struct k_itimer *timer, int flags,
  650. struct itimerspec *new, struct itimerspec *old)
  651. {
  652. struct task_struct *p = timer->it.cpu.task;
  653. union cpu_time_count old_expires, new_expires, val;
  654. int ret;
  655. if (unlikely(p == NULL)) {
  656. /*
  657. * Timer refers to a dead task's clock.
  658. */
  659. return -ESRCH;
  660. }
  661. new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
  662. read_lock(&tasklist_lock);
  663. /*
  664. * We need the tasklist_lock to protect against reaping that
  665. * clears p->signal. If p has just been reaped, we can no
  666. * longer get any information about it at all.
  667. */
  668. if (unlikely(p->signal == NULL)) {
  669. read_unlock(&tasklist_lock);
  670. put_task_struct(p);
  671. timer->it.cpu.task = NULL;
  672. return -ESRCH;
  673. }
  674. /*
  675. * Disarm any old timer after extracting its expiry time.
  676. */
  677. BUG_ON(!irqs_disabled());
  678. ret = 0;
  679. spin_lock(&p->sighand->siglock);
  680. old_expires = timer->it.cpu.expires;
  681. if (unlikely(timer->it.cpu.firing)) {
  682. timer->it.cpu.firing = -1;
  683. ret = TIMER_RETRY;
  684. } else
  685. list_del_init(&timer->it.cpu.entry);
  686. spin_unlock(&p->sighand->siglock);
  687. /*
  688. * We need to sample the current value to convert the new
  689. * value from to relative and absolute, and to convert the
  690. * old value from absolute to relative. To set a process
  691. * timer, we need a sample to balance the thread expiry
  692. * times (in arm_timer). With an absolute time, we must
  693. * check if it's already passed. In short, we need a sample.
  694. */
  695. if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
  696. cpu_clock_sample(timer->it_clock, p, &val);
  697. } else {
  698. cpu_clock_sample_group(timer->it_clock, p, &val);
  699. }
  700. if (old) {
  701. if (old_expires.sched == 0) {
  702. old->it_value.tv_sec = 0;
  703. old->it_value.tv_nsec = 0;
  704. } else {
  705. /*
  706. * Update the timer in case it has
  707. * overrun already. If it has,
  708. * we'll report it as having overrun
  709. * and with the next reloaded timer
  710. * already ticking, though we are
  711. * swallowing that pending
  712. * notification here to install the
  713. * new setting.
  714. */
  715. bump_cpu_timer(timer, val);
  716. if (cpu_time_before(timer->it_clock, val,
  717. timer->it.cpu.expires)) {
  718. old_expires = cpu_time_sub(
  719. timer->it_clock,
  720. timer->it.cpu.expires, val);
  721. sample_to_timespec(timer->it_clock,
  722. old_expires,
  723. &old->it_value);
  724. } else {
  725. old->it_value.tv_nsec = 1;
  726. old->it_value.tv_sec = 0;
  727. }
  728. }
  729. }
  730. if (unlikely(ret)) {
  731. /*
  732. * We are colliding with the timer actually firing.
  733. * Punt after filling in the timer's old value, and
  734. * disable this firing since we are already reporting
  735. * it as an overrun (thanks to bump_cpu_timer above).
  736. */
  737. read_unlock(&tasklist_lock);
  738. goto out;
  739. }
  740. if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
  741. cpu_time_add(timer->it_clock, &new_expires, val);
  742. }
  743. /*
  744. * Install the new expiry time (or zero).
  745. * For a timer with no notification action, we don't actually
  746. * arm the timer (we'll just fake it for timer_gettime).
  747. */
  748. timer->it.cpu.expires = new_expires;
  749. if (new_expires.sched != 0 &&
  750. (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
  751. cpu_time_before(timer->it_clock, val, new_expires)) {
  752. arm_timer(timer, val);
  753. }
  754. read_unlock(&tasklist_lock);
  755. /*
  756. * Install the new reload setting, and
  757. * set up the signal and overrun bookkeeping.
  758. */
  759. timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
  760. &new->it_interval);
  761. /*
  762. * This acts as a modification timestamp for the timer,
  763. * so any automatic reload attempt will punt on seeing
  764. * that we have reset the timer manually.
  765. */
  766. timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
  767. ~REQUEUE_PENDING;
  768. timer->it_overrun_last = 0;
  769. timer->it_overrun = -1;
  770. if (new_expires.sched != 0 &&
  771. (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
  772. !cpu_time_before(timer->it_clock, val, new_expires)) {
  773. /*
  774. * The designated time already passed, so we notify
  775. * immediately, even if the thread never runs to
  776. * accumulate more time on this clock.
  777. */
  778. cpu_timer_fire(timer);
  779. }
  780. ret = 0;
  781. out:
  782. if (old) {
  783. sample_to_timespec(timer->it_clock,
  784. timer->it.cpu.incr, &old->it_interval);
  785. }
  786. return ret;
  787. }
  788. void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
  789. {
  790. union cpu_time_count now;
  791. struct task_struct *p = timer->it.cpu.task;
  792. int clear_dead;
  793. /*
  794. * Easy part: convert the reload time.
  795. */
  796. sample_to_timespec(timer->it_clock,
  797. timer->it.cpu.incr, &itp->it_interval);
  798. if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
  799. itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
  800. return;
  801. }
  802. if (unlikely(p == NULL)) {
  803. /*
  804. * This task already died and the timer will never fire.
  805. * In this case, expires is actually the dead value.
  806. */
  807. dead:
  808. sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
  809. &itp->it_value);
  810. return;
  811. }
  812. /*
  813. * Sample the clock to take the difference with the expiry time.
  814. */
  815. if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
  816. cpu_clock_sample(timer->it_clock, p, &now);
  817. clear_dead = p->exit_state;
  818. } else {
  819. read_lock(&tasklist_lock);
  820. if (unlikely(p->signal == NULL)) {
  821. /*
  822. * The process has been reaped.
  823. * We can't even collect a sample any more.
  824. * Call the timer disarmed, nothing else to do.
  825. */
  826. put_task_struct(p);
  827. timer->it.cpu.task = NULL;
  828. timer->it.cpu.expires.sched = 0;
  829. read_unlock(&tasklist_lock);
  830. goto dead;
  831. } else {
  832. cpu_clock_sample_group(timer->it_clock, p, &now);
  833. clear_dead = (unlikely(p->exit_state) &&
  834. thread_group_empty(p));
  835. }
  836. read_unlock(&tasklist_lock);
  837. }
  838. if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
  839. if (timer->it.cpu.incr.sched == 0 &&
  840. cpu_time_before(timer->it_clock,
  841. timer->it.cpu.expires, now)) {
  842. /*
  843. * Do-nothing timer expired and has no reload,
  844. * so it's as if it was never set.
  845. */
  846. timer->it.cpu.expires.sched = 0;
  847. itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
  848. return;
  849. }
  850. /*
  851. * Account for any expirations and reloads that should
  852. * have happened.
  853. */
  854. bump_cpu_timer(timer, now);
  855. }
  856. if (unlikely(clear_dead)) {
  857. /*
  858. * We've noticed that the thread is dead, but
  859. * not yet reaped. Take this opportunity to
  860. * drop our task ref.
  861. */
  862. clear_dead_task(timer, now);
  863. goto dead;
  864. }
  865. if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
  866. sample_to_timespec(timer->it_clock,
  867. cpu_time_sub(timer->it_clock,
  868. timer->it.cpu.expires, now),
  869. &itp->it_value);
  870. } else {
  871. /*
  872. * The timer should have expired already, but the firing
  873. * hasn't taken place yet. Say it's just about to expire.
  874. */
  875. itp->it_value.tv_nsec = 1;
  876. itp->it_value.tv_sec = 0;
  877. }
  878. }
  879. /*
  880. * Check for any per-thread CPU timers that have fired and move them off
  881. * the tsk->cpu_timers[N] list onto the firing list. Here we update the
  882. * tsk->it_*_expires values to reflect the remaining thread CPU timers.
  883. */
  884. static void check_thread_timers(struct task_struct *tsk,
  885. struct list_head *firing)
  886. {
  887. int maxfire;
  888. struct list_head *timers = tsk->cpu_timers;
  889. maxfire = 20;
  890. tsk->it_prof_expires = cputime_zero;
  891. while (!list_empty(timers)) {
  892. struct cpu_timer_list *t = list_entry(timers->next,
  893. struct cpu_timer_list,
  894. entry);
  895. if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
  896. tsk->it_prof_expires = t->expires.cpu;
  897. break;
  898. }
  899. t->firing = 1;
  900. list_move_tail(&t->entry, firing);
  901. }
  902. ++timers;
  903. maxfire = 20;
  904. tsk->it_virt_expires = cputime_zero;
  905. while (!list_empty(timers)) {
  906. struct cpu_timer_list *t = list_entry(timers->next,
  907. struct cpu_timer_list,
  908. entry);
  909. if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
  910. tsk->it_virt_expires = t->expires.cpu;
  911. break;
  912. }
  913. t->firing = 1;
  914. list_move_tail(&t->entry, firing);
  915. }
  916. ++timers;
  917. maxfire = 20;
  918. tsk->it_sched_expires = 0;
  919. while (!list_empty(timers)) {
  920. struct cpu_timer_list *t = list_entry(timers->next,
  921. struct cpu_timer_list,
  922. entry);
  923. if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
  924. tsk->it_sched_expires = t->expires.sched;
  925. break;
  926. }
  927. t->firing = 1;
  928. list_move_tail(&t->entry, firing);
  929. }
  930. }
  931. /*
  932. * Check for any per-thread CPU timers that have fired and move them
  933. * off the tsk->*_timers list onto the firing list. Per-thread timers
  934. * have already been taken off.
  935. */
  936. static void check_process_timers(struct task_struct *tsk,
  937. struct list_head *firing)
  938. {
  939. int maxfire;
  940. struct signal_struct *const sig = tsk->signal;
  941. cputime_t utime, stime, ptime, virt_expires, prof_expires;
  942. unsigned long long sum_sched_runtime, sched_expires;
  943. struct task_struct *t;
  944. struct list_head *timers = sig->cpu_timers;
  945. /*
  946. * Don't sample the current process CPU clocks if there are no timers.
  947. */
  948. if (list_empty(&timers[CPUCLOCK_PROF]) &&
  949. cputime_eq(sig->it_prof_expires, cputime_zero) &&
  950. sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
  951. list_empty(&timers[CPUCLOCK_VIRT]) &&
  952. cputime_eq(sig->it_virt_expires, cputime_zero) &&
  953. list_empty(&timers[CPUCLOCK_SCHED]))
  954. return;
  955. /*
  956. * Collect the current process totals.
  957. */
  958. utime = sig->utime;
  959. stime = sig->stime;
  960. sum_sched_runtime = sig->sum_sched_runtime;
  961. t = tsk;
  962. do {
  963. utime = cputime_add(utime, t->utime);
  964. stime = cputime_add(stime, t->stime);
  965. sum_sched_runtime += t->se.sum_exec_runtime;
  966. t = next_thread(t);
  967. } while (t != tsk);
  968. ptime = cputime_add(utime, stime);
  969. maxfire = 20;
  970. prof_expires = cputime_zero;
  971. while (!list_empty(timers)) {
  972. struct cpu_timer_list *t = list_entry(timers->next,
  973. struct cpu_timer_list,
  974. entry);
  975. if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
  976. prof_expires = t->expires.cpu;
  977. break;
  978. }
  979. t->firing = 1;
  980. list_move_tail(&t->entry, firing);
  981. }
  982. ++timers;
  983. maxfire = 20;
  984. virt_expires = cputime_zero;
  985. while (!list_empty(timers)) {
  986. struct cpu_timer_list *t = list_entry(timers->next,
  987. struct cpu_timer_list,
  988. entry);
  989. if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
  990. virt_expires = t->expires.cpu;
  991. break;
  992. }
  993. t->firing = 1;
  994. list_move_tail(&t->entry, firing);
  995. }
  996. ++timers;
  997. maxfire = 20;
  998. sched_expires = 0;
  999. while (!list_empty(timers)) {
  1000. struct cpu_timer_list *t = list_entry(timers->next,
  1001. struct cpu_timer_list,
  1002. entry);
  1003. if (!--maxfire || sum_sched_runtime < t->expires.sched) {
  1004. sched_expires = t->expires.sched;
  1005. break;
  1006. }
  1007. t->firing = 1;
  1008. list_move_tail(&t->entry, firing);
  1009. }
  1010. /*
  1011. * Check for the special case process timers.
  1012. */
  1013. if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
  1014. if (cputime_ge(ptime, sig->it_prof_expires)) {
  1015. /* ITIMER_PROF fires and reloads. */
  1016. sig->it_prof_expires = sig->it_prof_incr;
  1017. if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
  1018. sig->it_prof_expires = cputime_add(
  1019. sig->it_prof_expires, ptime);
  1020. }
  1021. __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
  1022. }
  1023. if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
  1024. (cputime_eq(prof_expires, cputime_zero) ||
  1025. cputime_lt(sig->it_prof_expires, prof_expires))) {
  1026. prof_expires = sig->it_prof_expires;
  1027. }
  1028. }
  1029. if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
  1030. if (cputime_ge(utime, sig->it_virt_expires)) {
  1031. /* ITIMER_VIRTUAL fires and reloads. */
  1032. sig->it_virt_expires = sig->it_virt_incr;
  1033. if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
  1034. sig->it_virt_expires = cputime_add(
  1035. sig->it_virt_expires, utime);
  1036. }
  1037. __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
  1038. }
  1039. if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
  1040. (cputime_eq(virt_expires, cputime_zero) ||
  1041. cputime_lt(sig->it_virt_expires, virt_expires))) {
  1042. virt_expires = sig->it_virt_expires;
  1043. }
  1044. }
  1045. if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
  1046. unsigned long psecs = cputime_to_secs(ptime);
  1047. cputime_t x;
  1048. if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
  1049. /*
  1050. * At the hard limit, we just die.
  1051. * No need to calculate anything else now.
  1052. */
  1053. __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
  1054. return;
  1055. }
  1056. if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
  1057. /*
  1058. * At the soft limit, send a SIGXCPU every second.
  1059. */
  1060. __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
  1061. if (sig->rlim[RLIMIT_CPU].rlim_cur
  1062. < sig->rlim[RLIMIT_CPU].rlim_max) {
  1063. sig->rlim[RLIMIT_CPU].rlim_cur++;
  1064. }
  1065. }
  1066. x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
  1067. if (cputime_eq(prof_expires, cputime_zero) ||
  1068. cputime_lt(x, prof_expires)) {
  1069. prof_expires = x;
  1070. }
  1071. }
  1072. if (!cputime_eq(prof_expires, cputime_zero) ||
  1073. !cputime_eq(virt_expires, cputime_zero) ||
  1074. sched_expires != 0) {
  1075. /*
  1076. * Rebalance the threads' expiry times for the remaining
  1077. * process CPU timers.
  1078. */
  1079. cputime_t prof_left, virt_left, ticks;
  1080. unsigned long long sched_left, sched;
  1081. const unsigned int nthreads = atomic_read(&sig->live);
  1082. if (!nthreads)
  1083. return;
  1084. prof_left = cputime_sub(prof_expires, utime);
  1085. prof_left = cputime_sub(prof_left, stime);
  1086. prof_left = cputime_div_non_zero(prof_left, nthreads);
  1087. virt_left = cputime_sub(virt_expires, utime);
  1088. virt_left = cputime_div_non_zero(virt_left, nthreads);
  1089. if (sched_expires) {
  1090. sched_left = sched_expires - sum_sched_runtime;
  1091. do_div(sched_left, nthreads);
  1092. sched_left = max_t(unsigned long long, sched_left, 1);
  1093. } else {
  1094. sched_left = 0;
  1095. }
  1096. t = tsk;
  1097. do {
  1098. if (unlikely(t->flags & PF_EXITING))
  1099. continue;
  1100. ticks = cputime_add(cputime_add(t->utime, t->stime),
  1101. prof_left);
  1102. if (!cputime_eq(prof_expires, cputime_zero) &&
  1103. (cputime_eq(t->it_prof_expires, cputime_zero) ||
  1104. cputime_gt(t->it_prof_expires, ticks))) {
  1105. t->it_prof_expires = ticks;
  1106. }
  1107. ticks = cputime_add(t->utime, virt_left);
  1108. if (!cputime_eq(virt_expires, cputime_zero) &&
  1109. (cputime_eq(t->it_virt_expires, cputime_zero) ||
  1110. cputime_gt(t->it_virt_expires, ticks))) {
  1111. t->it_virt_expires = ticks;
  1112. }
  1113. sched = t->se.sum_exec_runtime + sched_left;
  1114. if (sched_expires && (t->it_sched_expires == 0 ||
  1115. t->it_sched_expires > sched)) {
  1116. t->it_sched_expires = sched;
  1117. }
  1118. } while ((t = next_thread(t)) != tsk);
  1119. }
  1120. }
  1121. /*
  1122. * This is called from the signal code (via do_schedule_next_timer)
  1123. * when the last timer signal was delivered and we have to reload the timer.
  1124. */
  1125. void posix_cpu_timer_schedule(struct k_itimer *timer)
  1126. {
  1127. struct task_struct *p = timer->it.cpu.task;
  1128. union cpu_time_count now;
  1129. if (unlikely(p == NULL))
  1130. /*
  1131. * The task was cleaned up already, no future firings.
  1132. */
  1133. goto out;
  1134. /*
  1135. * Fetch the current sample and update the timer's expiry time.
  1136. */
  1137. if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
  1138. cpu_clock_sample(timer->it_clock, p, &now);
  1139. bump_cpu_timer(timer, now);
  1140. if (unlikely(p->exit_state)) {
  1141. clear_dead_task(timer, now);
  1142. goto out;
  1143. }
  1144. read_lock(&tasklist_lock); /* arm_timer needs it. */
  1145. } else {
  1146. read_lock(&tasklist_lock);
  1147. if (unlikely(p->signal == NULL)) {
  1148. /*
  1149. * The process has been reaped.
  1150. * We can't even collect a sample any more.
  1151. */
  1152. put_task_struct(p);
  1153. timer->it.cpu.task = p = NULL;
  1154. timer->it.cpu.expires.sched = 0;
  1155. goto out_unlock;
  1156. } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
  1157. /*
  1158. * We've noticed that the thread is dead, but
  1159. * not yet reaped. Take this opportunity to
  1160. * drop our task ref.
  1161. */
  1162. clear_dead_task(timer, now);
  1163. goto out_unlock;
  1164. }
  1165. cpu_clock_sample_group(timer->it_clock, p, &now);
  1166. bump_cpu_timer(timer, now);
  1167. /* Leave the tasklist_lock locked for the call below. */
  1168. }
  1169. /*
  1170. * Now re-arm for the new expiry time.
  1171. */
  1172. arm_timer(timer, now);
  1173. out_unlock:
  1174. read_unlock(&tasklist_lock);
  1175. out:
  1176. timer->it_overrun_last = timer->it_overrun;
  1177. timer->it_overrun = -1;
  1178. ++timer->it_requeue_pending;
  1179. }
  1180. /*
  1181. * This is called from the timer interrupt handler. The irq handler has
  1182. * already updated our counts. We need to check if any timers fire now.
  1183. * Interrupts are disabled.
  1184. */
  1185. void run_posix_cpu_timers(struct task_struct *tsk)
  1186. {
  1187. LIST_HEAD(firing);
  1188. struct k_itimer *timer, *next;
  1189. BUG_ON(!irqs_disabled());
  1190. #define UNEXPIRED(clock) \
  1191. (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \
  1192. cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))
  1193. if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
  1194. (tsk->it_sched_expires == 0 ||
  1195. tsk->se.sum_exec_runtime < tsk->it_sched_expires))
  1196. return;
  1197. #undef UNEXPIRED
  1198. /*
  1199. * Double-check with locks held.
  1200. */
  1201. read_lock(&tasklist_lock);
  1202. if (likely(tsk->signal != NULL)) {
  1203. spin_lock(&tsk->sighand->siglock);
  1204. /*
  1205. * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
  1206. * all the timers that are firing, and put them on the firing list.
  1207. */
  1208. check_thread_timers(tsk, &firing);
  1209. check_process_timers(tsk, &firing);
  1210. /*
  1211. * We must release these locks before taking any timer's lock.
  1212. * There is a potential race with timer deletion here, as the
  1213. * siglock now protects our private firing list. We have set
  1214. * the firing flag in each timer, so that a deletion attempt
  1215. * that gets the timer lock before we do will give it up and
  1216. * spin until we've taken care of that timer below.
  1217. */
  1218. spin_unlock(&tsk->sighand->siglock);
  1219. }
  1220. read_unlock(&tasklist_lock);
  1221. /*
  1222. * Now that all the timers on our list have the firing flag,
  1223. * noone will touch their list entries but us. We'll take
  1224. * each timer's lock before clearing its firing flag, so no
  1225. * timer call will interfere.
  1226. */
  1227. list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
  1228. int firing;
  1229. spin_lock(&timer->it_lock);
  1230. list_del_init(&timer->it.cpu.entry);
  1231. firing = timer->it.cpu.firing;
  1232. timer->it.cpu.firing = 0;
  1233. /*
  1234. * The firing flag is -1 if we collided with a reset
  1235. * of the timer, which already reported this
  1236. * almost-firing as an overrun. So don't generate an event.
  1237. */
  1238. if (likely(firing >= 0)) {
  1239. cpu_timer_fire(timer);
  1240. }
  1241. spin_unlock(&timer->it_lock);
  1242. }
  1243. }
  1244. /*
  1245. * Set one of the process-wide special case CPU timers.
  1246. * The tasklist_lock and tsk->sighand->siglock must be held by the caller.
  1247. * The oldval argument is null for the RLIMIT_CPU timer, where *newval is
  1248. * absolute; non-null for ITIMER_*, where *newval is relative and we update
  1249. * it to be absolute, *oldval is absolute and we update it to be relative.
  1250. */
  1251. void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
  1252. cputime_t *newval, cputime_t *oldval)
  1253. {
  1254. union cpu_time_count now;
  1255. struct list_head *head;
  1256. BUG_ON(clock_idx == CPUCLOCK_SCHED);
  1257. cpu_clock_sample_group_locked(clock_idx, tsk, &now);
  1258. if (oldval) {
  1259. if (!cputime_eq(*oldval, cputime_zero)) {
  1260. if (cputime_le(*oldval, now.cpu)) {
  1261. /* Just about to fire. */
  1262. *oldval = jiffies_to_cputime(1);
  1263. } else {
  1264. *oldval = cputime_sub(*oldval, now.cpu);
  1265. }
  1266. }
  1267. if (cputime_eq(*newval, cputime_zero))
  1268. return;
  1269. *newval = cputime_add(*newval, now.cpu);
  1270. /*
  1271. * If the RLIMIT_CPU timer will expire before the
  1272. * ITIMER_PROF timer, we have nothing else to do.
  1273. */
  1274. if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
  1275. < cputime_to_secs(*newval))
  1276. return;
  1277. }
  1278. /*
  1279. * Check whether there are any process timers already set to fire
  1280. * before this one. If so, we don't have anything more to do.
  1281. */
  1282. head = &tsk->signal->cpu_timers[clock_idx];
  1283. if (list_empty(head) ||
  1284. cputime_ge(list_entry(head->next,
  1285. struct cpu_timer_list, entry)->expires.cpu,
  1286. *newval)) {
  1287. /*
  1288. * Rejigger each thread's expiry time so that one will
  1289. * notice before we hit the process-cumulative expiry time.
  1290. */
  1291. union cpu_time_count expires = { .sched = 0 };
  1292. expires.cpu = *newval;
  1293. process_timer_rebalance(tsk, clock_idx, expires, now);
  1294. }
  1295. }
  1296. static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
  1297. struct timespec *rqtp, struct itimerspec *it)
  1298. {
  1299. struct k_itimer timer;
  1300. int error;
  1301. /*
  1302. * Set up a temporary timer and then wait for it to go off.
  1303. */
  1304. memset(&timer, 0, sizeof timer);
  1305. spin_lock_init(&timer.it_lock);
  1306. timer.it_clock = which_clock;
  1307. timer.it_overrun = -1;
  1308. error = posix_cpu_timer_create(&timer);
  1309. timer.it_process = current;
  1310. if (!error) {
  1311. static struct itimerspec zero_it;
  1312. memset(it, 0, sizeof *it);
  1313. it->it_value = *rqtp;
  1314. spin_lock_irq(&timer.it_lock);
  1315. error = posix_cpu_timer_set(&timer, flags, it, NULL);
  1316. if (error) {
  1317. spin_unlock_irq(&timer.it_lock);
  1318. return error;
  1319. }
  1320. while (!signal_pending(current)) {
  1321. if (timer.it.cpu.expires.sched == 0) {
  1322. /*
  1323. * Our timer fired and was reset.
  1324. */
  1325. spin_unlock_irq(&timer.it_lock);
  1326. return 0;
  1327. }
  1328. /*
  1329. * Block until cpu_timer_fire (or a signal) wakes us.
  1330. */
  1331. __set_current_state(TASK_INTERRUPTIBLE);
  1332. spin_unlock_irq(&timer.it_lock);
  1333. schedule();
  1334. spin_lock_irq(&timer.it_lock);
  1335. }
  1336. /*
  1337. * We were interrupted by a signal.
  1338. */
  1339. sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
  1340. posix_cpu_timer_set(&timer, 0, &zero_it, it);
  1341. spin_unlock_irq(&timer.it_lock);
  1342. if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
  1343. /*
  1344. * It actually did fire already.
  1345. */
  1346. return 0;
  1347. }
  1348. error = -ERESTART_RESTARTBLOCK;
  1349. }
  1350. return error;
  1351. }
  1352. int posix_cpu_nsleep(const clockid_t which_clock, int flags,
  1353. struct timespec *rqtp, struct timespec __user *rmtp)
  1354. {
  1355. struct restart_block *restart_block =
  1356. &current_thread_info()->restart_block;
  1357. struct itimerspec it;
  1358. int error;
  1359. /*
  1360. * Diagnose required errors first.
  1361. */
  1362. if (CPUCLOCK_PERTHREAD(which_clock) &&
  1363. (CPUCLOCK_PID(which_clock) == 0 ||
  1364. CPUCLOCK_PID(which_clock) == current->pid))
  1365. return -EINVAL;
  1366. error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
  1367. if (error == -ERESTART_RESTARTBLOCK) {
  1368. if (flags & TIMER_ABSTIME)
  1369. return -ERESTARTNOHAND;
  1370. /*
  1371. * Report back to the user the time still remaining.
  1372. */
  1373. if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
  1374. return -EFAULT;
  1375. restart_block->fn = posix_cpu_nsleep_restart;
  1376. restart_block->arg0 = which_clock;
  1377. restart_block->arg1 = (unsigned long) rmtp;
  1378. restart_block->arg2 = rqtp->tv_sec;
  1379. restart_block->arg3 = rqtp->tv_nsec;
  1380. }
  1381. return error;
  1382. }
  1383. long posix_cpu_nsleep_restart(struct restart_block *restart_block)
  1384. {
  1385. clockid_t which_clock = restart_block->arg0;
  1386. struct timespec __user *rmtp;
  1387. struct timespec t;
  1388. struct itimerspec it;
  1389. int error;
  1390. rmtp = (struct timespec __user *) restart_block->arg1;
  1391. t.tv_sec = restart_block->arg2;
  1392. t.tv_nsec = restart_block->arg3;
  1393. restart_block->fn = do_no_restart_syscall;
  1394. error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
  1395. if (error == -ERESTART_RESTARTBLOCK) {
  1396. /*
  1397. * Report back to the user the time still remaining.
  1398. */
  1399. if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
  1400. return -EFAULT;
  1401. restart_block->fn = posix_cpu_nsleep_restart;
  1402. restart_block->arg0 = which_clock;
  1403. restart_block->arg1 = (unsigned long) rmtp;
  1404. restart_block->arg2 = t.tv_sec;
  1405. restart_block->arg3 = t.tv_nsec;
  1406. }
  1407. return error;
  1408. }
  1409. #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
  1410. #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
  1411. static int process_cpu_clock_getres(const clockid_t which_clock,
  1412. struct timespec *tp)
  1413. {
  1414. return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
  1415. }
  1416. static int process_cpu_clock_get(const clockid_t which_clock,
  1417. struct timespec *tp)
  1418. {
  1419. return posix_cpu_clock_get(PROCESS_CLOCK, tp);
  1420. }
  1421. static int process_cpu_timer_create(struct k_itimer *timer)
  1422. {
  1423. timer->it_clock = PROCESS_CLOCK;
  1424. return posix_cpu_timer_create(timer);
  1425. }
  1426. static int process_cpu_nsleep(const clockid_t which_clock, int flags,
  1427. struct timespec *rqtp,
  1428. struct timespec __user *rmtp)
  1429. {
  1430. return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
  1431. }
  1432. static long process_cpu_nsleep_restart(struct restart_block *restart_block)
  1433. {
  1434. return -EINVAL;
  1435. }
  1436. static int thread_cpu_clock_getres(const clockid_t which_clock,
  1437. struct timespec *tp)
  1438. {
  1439. return posix_cpu_clock_getres(THREAD_CLOCK, tp);
  1440. }
  1441. static int thread_cpu_clock_get(const clockid_t which_clock,
  1442. struct timespec *tp)
  1443. {
  1444. return posix_cpu_clock_get(THREAD_CLOCK, tp);
  1445. }
  1446. static int thread_cpu_timer_create(struct k_itimer *timer)
  1447. {
  1448. timer->it_clock = THREAD_CLOCK;
  1449. return posix_cpu_timer_create(timer);
  1450. }
  1451. static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
  1452. struct timespec *rqtp, struct timespec __user *rmtp)
  1453. {
  1454. return -EINVAL;
  1455. }
  1456. static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
  1457. {
  1458. return -EINVAL;
  1459. }
  1460. static __init int init_posix_cpu_timers(void)
  1461. {
  1462. struct k_clock process = {
  1463. .clock_getres = process_cpu_clock_getres,
  1464. .clock_get = process_cpu_clock_get,
  1465. .clock_set = do_posix_clock_nosettime,
  1466. .timer_create = process_cpu_timer_create,
  1467. .nsleep = process_cpu_nsleep,
  1468. .nsleep_restart = process_cpu_nsleep_restart,
  1469. };
  1470. struct k_clock thread = {
  1471. .clock_getres = thread_cpu_clock_getres,
  1472. .clock_get = thread_cpu_clock_get,
  1473. .clock_set = do_posix_clock_nosettime,
  1474. .timer_create = thread_cpu_timer_create,
  1475. .nsleep = thread_cpu_nsleep,
  1476. .nsleep_restart = thread_cpu_nsleep_restart,
  1477. };
  1478. register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
  1479. register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
  1480. return 0;
  1481. }
  1482. __initcall(init_posix_cpu_timers);