psi.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400
  1. /*
  2. * Pressure stall information for CPU, memory and IO
  3. *
  4. * Copyright (c) 2018 Facebook, Inc.
  5. * Author: Johannes Weiner <hannes@cmpxchg.org>
  6. *
  7. * Polling support by Suren Baghdasaryan <surenb@google.com>
  8. * Copyright (c) 2018 Google, Inc.
  9. *
  10. * When CPU, memory and IO are contended, tasks experience delays that
  11. * reduce throughput and introduce latencies into the workload. Memory
  12. * and IO contention, in addition, can cause a full loss of forward
  13. * progress in which the CPU goes idle.
  14. *
  15. * This code aggregates individual task delays into resource pressure
  16. * metrics that indicate problems with both workload health and
  17. * resource utilization.
  18. *
  19. * Model
  20. *
  21. * The time in which a task can execute on a CPU is our baseline for
  22. * productivity. Pressure expresses the amount of time in which this
  23. * potential cannot be realized due to resource contention.
  24. *
  25. * This concept of productivity has two components: the workload and
  26. * the CPU. To measure the impact of pressure on both, we define two
  27. * contention states for a resource: SOME and FULL.
  28. *
  29. * In the SOME state of a given resource, one or more tasks are
  30. * delayed on that resource. This affects the workload's ability to
  31. * perform work, but the CPU may still be executing other tasks.
  32. *
  33. * In the FULL state of a given resource, all non-idle tasks are
  34. * delayed on that resource such that nobody is advancing and the CPU
  35. * goes idle. This leaves both workload and CPU unproductive.
  36. *
  37. * (Naturally, the FULL state doesn't exist for the CPU resource.)
  38. *
  39. * SOME = nr_delayed_tasks != 0
  40. * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0
  41. *
  42. * The percentage of wallclock time spent in those compound stall
  43. * states gives pressure numbers between 0 and 100 for each resource,
  44. * where the SOME percentage indicates workload slowdowns and the FULL
  45. * percentage indicates reduced CPU utilization:
  46. *
  47. * %SOME = time(SOME) / period
  48. * %FULL = time(FULL) / period
  49. *
  50. * Multiple CPUs
  51. *
  52. * The more tasks and available CPUs there are, the more work can be
  53. * performed concurrently. This means that the potential that can go
  54. * unrealized due to resource contention *also* scales with non-idle
  55. * tasks and CPUs.
  56. *
  57. * Consider a scenario where 257 number crunching tasks are trying to
  58. * run concurrently on 256 CPUs. If we simply aggregated the task
  59. * states, we would have to conclude a CPU SOME pressure number of
  60. * 100%, since *somebody* is waiting on a runqueue at all
  61. * times. However, that is clearly not the amount of contention the
  62. * workload is experiencing: only one out of 256 possible exceution
  63. * threads will be contended at any given time, or about 0.4%.
  64. *
  65. * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
  66. * given time *one* of the tasks is delayed due to a lack of memory.
  67. * Again, looking purely at the task state would yield a memory FULL
  68. * pressure number of 0%, since *somebody* is always making forward
  69. * progress. But again this wouldn't capture the amount of execution
  70. * potential lost, which is 1 out of 4 CPUs, or 25%.
  71. *
  72. * To calculate wasted potential (pressure) with multiple processors,
  73. * we have to base our calculation on the number of non-idle tasks in
  74. * conjunction with the number of available CPUs, which is the number
  75. * of potential execution threads. SOME becomes then the proportion of
  76. * delayed tasks to possibe threads, and FULL is the share of possible
  77. * threads that are unproductive due to delays:
  78. *
  79. * threads = min(nr_nonidle_tasks, nr_cpus)
  80. * SOME = min(nr_delayed_tasks / threads, 1)
  81. * FULL = (threads - min(nr_running_tasks, threads)) / threads
  82. *
  83. * For the 257 number crunchers on 256 CPUs, this yields:
  84. *
  85. * threads = min(257, 256)
  86. * SOME = min(1 / 256, 1) = 0.4%
  87. * FULL = (256 - min(257, 256)) / 256 = 0%
  88. *
  89. * For the 1 out of 4 memory-delayed tasks, this yields:
  90. *
  91. * threads = min(4, 4)
  92. * SOME = min(1 / 4, 1) = 25%
  93. * FULL = (4 - min(3, 4)) / 4 = 25%
  94. *
  95. * [ Substitute nr_cpus with 1, and you can see that it's a natural
  96. * extension of the single-CPU model. ]
  97. *
  98. * Implementation
  99. *
  100. * To assess the precise time spent in each such state, we would have
  101. * to freeze the system on task changes and start/stop the state
  102. * clocks accordingly. Obviously that doesn't scale in practice.
  103. *
  104. * Because the scheduler aims to distribute the compute load evenly
  105. * among the available CPUs, we can track task state locally to each
  106. * CPU and, at much lower frequency, extrapolate the global state for
  107. * the cumulative stall times and the running averages.
  108. *
  109. * For each runqueue, we track:
  110. *
  111. * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
  112. * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu])
  113. * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
  114. *
  115. * and then periodically aggregate:
  116. *
  117. * tNONIDLE = sum(tNONIDLE[i])
  118. *
  119. * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
  120. * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
  121. *
  122. * %SOME = tSOME / period
  123. * %FULL = tFULL / period
  124. *
  125. * This gives us an approximation of pressure that is practical
  126. * cost-wise, yet way more sensitive and accurate than periodic
  127. * sampling of the aggregate task states would be.
  128. */
  129. #include "../workqueue_internal.h"
  130. #include <linux/sched/loadavg.h>
  131. #include <linux/seq_file.h>
  132. #include <linux/proc_fs.h>
  133. #include <linux/seqlock.h>
  134. #include <linux/uaccess.h>
  135. #include <linux/cgroup.h>
  136. #include <linux/module.h>
  137. #include <linux/sched.h>
  138. #include <linux/ctype.h>
  139. #include <linux/file.h>
  140. #include <linux/poll.h>
  141. #include <linux/psi.h>
  142. #include "sched.h"
  143. #include <trace/hooks/psi.h>
  144. static int psi_bug __read_mostly;
  145. DEFINE_STATIC_KEY_FALSE(psi_disabled);
  146. DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
  147. #ifdef CONFIG_PSI_DEFAULT_DISABLED
  148. static bool psi_enable;
  149. #else
  150. static bool psi_enable = true;
  151. #endif
  152. static int __init setup_psi(char *str)
  153. {
  154. return kstrtobool(str, &psi_enable) == 0;
  155. }
  156. __setup("psi=", setup_psi);
  157. /* Running averages - we need to be higher-res than loadavg */
  158. #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
  159. #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
  160. #define EXP_60s 1981 /* 1/exp(2s/60s) */
  161. #define EXP_300s 2034 /* 1/exp(2s/300s) */
  162. /* PSI trigger definitions */
  163. #define WINDOW_MIN_US 500000 /* Min window size is 500ms */
  164. #define WINDOW_MAX_US 10000000 /* Max window size is 10s */
  165. #define UPDATES_PER_WINDOW 10 /* 10 updates per window */
  166. /* Sampling frequency in nanoseconds */
  167. static u64 psi_period __read_mostly;
  168. /* System-level pressure and stall tracking */
  169. static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
  170. struct psi_group psi_system = {
  171. .pcpu = &system_group_pcpu,
  172. };
  173. static void psi_avgs_work(struct work_struct *work);
  174. static void poll_timer_fn(struct timer_list *t);
  175. static void group_init(struct psi_group *group)
  176. {
  177. int cpu;
  178. for_each_possible_cpu(cpu)
  179. seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
  180. group->avg_last_update = sched_clock();
  181. group->avg_next_update = group->avg_last_update + psi_period;
  182. INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
  183. mutex_init(&group->avgs_lock);
  184. /* Init trigger-related members */
  185. atomic_set(&group->poll_scheduled, 0);
  186. mutex_init(&group->trigger_lock);
  187. INIT_LIST_HEAD(&group->triggers);
  188. memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
  189. group->poll_states = 0;
  190. group->poll_min_period = U32_MAX;
  191. memset(group->polling_total, 0, sizeof(group->polling_total));
  192. group->polling_next_update = ULLONG_MAX;
  193. group->polling_until = 0;
  194. init_waitqueue_head(&group->poll_wait);
  195. timer_setup(&group->poll_timer, poll_timer_fn, 0);
  196. rcu_assign_pointer(group->poll_task, NULL);
  197. }
  198. void __init psi_init(void)
  199. {
  200. if (!psi_enable) {
  201. static_branch_enable(&psi_disabled);
  202. return;
  203. }
  204. if (!cgroup_psi_enabled())
  205. static_branch_disable(&psi_cgroups_enabled);
  206. psi_period = jiffies_to_nsecs(PSI_FREQ);
  207. group_init(&psi_system);
  208. }
  209. static bool test_state(unsigned int *tasks, enum psi_states state)
  210. {
  211. switch (state) {
  212. case PSI_IO_SOME:
  213. return tasks[NR_IOWAIT];
  214. case PSI_IO_FULL:
  215. return tasks[NR_IOWAIT] && !tasks[NR_RUNNING];
  216. case PSI_MEM_SOME:
  217. return tasks[NR_MEMSTALL];
  218. case PSI_MEM_FULL:
  219. return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING];
  220. case PSI_CPU_SOME:
  221. return tasks[NR_RUNNING] > tasks[NR_ONCPU];
  222. case PSI_NONIDLE:
  223. return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
  224. tasks[NR_RUNNING];
  225. default:
  226. return false;
  227. }
  228. }
  229. static void get_recent_times(struct psi_group *group, int cpu,
  230. enum psi_aggregators aggregator, u32 *times,
  231. u32 *pchanged_states)
  232. {
  233. struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
  234. u64 now, state_start;
  235. enum psi_states s;
  236. unsigned int seq;
  237. u32 state_mask;
  238. *pchanged_states = 0;
  239. /* Snapshot a coherent view of the CPU state */
  240. do {
  241. seq = read_seqcount_begin(&groupc->seq);
  242. now = cpu_clock(cpu);
  243. memcpy(times, groupc->times, sizeof(groupc->times));
  244. state_mask = groupc->state_mask;
  245. state_start = groupc->state_start;
  246. } while (read_seqcount_retry(&groupc->seq, seq));
  247. /* Calculate state time deltas against the previous snapshot */
  248. for (s = 0; s < NR_PSI_STATES; s++) {
  249. u32 delta;
  250. /*
  251. * In addition to already concluded states, we also
  252. * incorporate currently active states on the CPU,
  253. * since states may last for many sampling periods.
  254. *
  255. * This way we keep our delta sampling buckets small
  256. * (u32) and our reported pressure close to what's
  257. * actually happening.
  258. */
  259. if (state_mask & (1 << s))
  260. times[s] += now - state_start;
  261. delta = times[s] - groupc->times_prev[aggregator][s];
  262. groupc->times_prev[aggregator][s] = times[s];
  263. times[s] = delta;
  264. if (delta)
  265. *pchanged_states |= (1 << s);
  266. }
  267. }
  268. static void calc_avgs(unsigned long avg[3], int missed_periods,
  269. u64 time, u64 period)
  270. {
  271. unsigned long pct;
  272. /* Fill in zeroes for periods of no activity */
  273. if (missed_periods) {
  274. avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
  275. avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
  276. avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
  277. }
  278. /* Sample the most recent active period */
  279. pct = div_u64(time * 100, period);
  280. pct *= FIXED_1;
  281. avg[0] = calc_load(avg[0], EXP_10s, pct);
  282. avg[1] = calc_load(avg[1], EXP_60s, pct);
  283. avg[2] = calc_load(avg[2], EXP_300s, pct);
  284. }
  285. static void collect_percpu_times(struct psi_group *group,
  286. enum psi_aggregators aggregator,
  287. u32 *pchanged_states)
  288. {
  289. u64 deltas[NR_PSI_STATES - 1] = { 0, };
  290. unsigned long nonidle_total = 0;
  291. u32 changed_states = 0;
  292. int cpu;
  293. int s;
  294. /*
  295. * Collect the per-cpu time buckets and average them into a
  296. * single time sample that is normalized to wallclock time.
  297. *
  298. * For averaging, each CPU is weighted by its non-idle time in
  299. * the sampling period. This eliminates artifacts from uneven
  300. * loading, or even entirely idle CPUs.
  301. */
  302. for_each_possible_cpu(cpu) {
  303. u32 times[NR_PSI_STATES];
  304. u32 nonidle;
  305. u32 cpu_changed_states;
  306. get_recent_times(group, cpu, aggregator, times,
  307. &cpu_changed_states);
  308. changed_states |= cpu_changed_states;
  309. nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
  310. nonidle_total += nonidle;
  311. for (s = 0; s < PSI_NONIDLE; s++)
  312. deltas[s] += (u64)times[s] * nonidle;
  313. }
  314. /*
  315. * Integrate the sample into the running statistics that are
  316. * reported to userspace: the cumulative stall times and the
  317. * decaying averages.
  318. *
  319. * Pressure percentages are sampled at PSI_FREQ. We might be
  320. * called more often when the user polls more frequently than
  321. * that; we might be called less often when there is no task
  322. * activity, thus no data, and clock ticks are sporadic. The
  323. * below handles both.
  324. */
  325. /* total= */
  326. for (s = 0; s < NR_PSI_STATES - 1; s++)
  327. group->total[aggregator][s] +=
  328. div_u64(deltas[s], max(nonidle_total, 1UL));
  329. if (pchanged_states)
  330. *pchanged_states = changed_states;
  331. }
  332. static u64 update_averages(struct psi_group *group, u64 now)
  333. {
  334. unsigned long missed_periods = 0;
  335. u64 expires, period;
  336. u64 avg_next_update;
  337. int s;
  338. /* avgX= */
  339. expires = group->avg_next_update;
  340. if (now - expires >= psi_period)
  341. missed_periods = div_u64(now - expires, psi_period);
  342. /*
  343. * The periodic clock tick can get delayed for various
  344. * reasons, especially on loaded systems. To avoid clock
  345. * drift, we schedule the clock in fixed psi_period intervals.
  346. * But the deltas we sample out of the per-cpu buckets above
  347. * are based on the actual time elapsing between clock ticks.
  348. */
  349. avg_next_update = expires + ((1 + missed_periods) * psi_period);
  350. period = now - (group->avg_last_update + (missed_periods * psi_period));
  351. group->avg_last_update = now;
  352. for (s = 0; s < NR_PSI_STATES - 1; s++) {
  353. u32 sample;
  354. sample = group->total[PSI_AVGS][s] - group->avg_total[s];
  355. /*
  356. * Due to the lockless sampling of the time buckets,
  357. * recorded time deltas can slip into the next period,
  358. * which under full pressure can result in samples in
  359. * excess of the period length.
  360. *
  361. * We don't want to report non-sensical pressures in
  362. * excess of 100%, nor do we want to drop such events
  363. * on the floor. Instead we punt any overage into the
  364. * future until pressure subsides. By doing this we
  365. * don't underreport the occurring pressure curve, we
  366. * just report it delayed by one period length.
  367. *
  368. * The error isn't cumulative. As soon as another
  369. * delta slips from a period P to P+1, by definition
  370. * it frees up its time T in P.
  371. */
  372. if (sample > period)
  373. sample = period;
  374. group->avg_total[s] += sample;
  375. calc_avgs(group->avg[s], missed_periods, sample, period);
  376. }
  377. return avg_next_update;
  378. }
  379. static void psi_avgs_work(struct work_struct *work)
  380. {
  381. struct delayed_work *dwork;
  382. struct psi_group *group;
  383. u32 changed_states;
  384. bool nonidle;
  385. u64 now;
  386. dwork = to_delayed_work(work);
  387. group = container_of(dwork, struct psi_group, avgs_work);
  388. mutex_lock(&group->avgs_lock);
  389. now = sched_clock();
  390. collect_percpu_times(group, PSI_AVGS, &changed_states);
  391. nonidle = changed_states & (1 << PSI_NONIDLE);
  392. /*
  393. * If there is task activity, periodically fold the per-cpu
  394. * times and feed samples into the running averages. If things
  395. * are idle and there is no data to process, stop the clock.
  396. * Once restarted, we'll catch up the running averages in one
  397. * go - see calc_avgs() and missed_periods.
  398. */
  399. if (now >= group->avg_next_update)
  400. group->avg_next_update = update_averages(group, now);
  401. if (nonidle) {
  402. schedule_delayed_work(dwork, nsecs_to_jiffies(
  403. group->avg_next_update - now) + 1);
  404. }
  405. mutex_unlock(&group->avgs_lock);
  406. }
  407. /* Trigger tracking window manupulations */
  408. static void window_reset(struct psi_window *win, u64 now, u64 value,
  409. u64 prev_growth)
  410. {
  411. win->start_time = now;
  412. win->start_value = value;
  413. win->prev_growth = prev_growth;
  414. }
  415. /*
  416. * PSI growth tracking window update and growth calculation routine.
  417. *
  418. * This approximates a sliding tracking window by interpolating
  419. * partially elapsed windows using historical growth data from the
  420. * previous intervals. This minimizes memory requirements (by not storing
  421. * all the intermediate values in the previous window) and simplifies
  422. * the calculations. It works well because PSI signal changes only in
  423. * positive direction and over relatively small window sizes the growth
  424. * is close to linear.
  425. */
  426. static u64 window_update(struct psi_window *win, u64 now, u64 value)
  427. {
  428. u64 elapsed;
  429. u64 growth;
  430. elapsed = now - win->start_time;
  431. growth = value - win->start_value;
  432. /*
  433. * After each tracking window passes win->start_value and
  434. * win->start_time get reset and win->prev_growth stores
  435. * the average per-window growth of the previous window.
  436. * win->prev_growth is then used to interpolate additional
  437. * growth from the previous window assuming it was linear.
  438. */
  439. if (elapsed > win->size)
  440. window_reset(win, now, value, growth);
  441. else {
  442. u32 remaining;
  443. remaining = win->size - elapsed;
  444. growth += div64_u64(win->prev_growth * remaining, win->size);
  445. }
  446. return growth;
  447. }
  448. static void init_triggers(struct psi_group *group, u64 now)
  449. {
  450. struct psi_trigger *t;
  451. list_for_each_entry(t, &group->triggers, node)
  452. window_reset(&t->win, now,
  453. group->total[PSI_POLL][t->state], 0);
  454. memcpy(group->polling_total, group->total[PSI_POLL],
  455. sizeof(group->polling_total));
  456. group->polling_next_update = now + group->poll_min_period;
  457. }
  458. static u64 update_triggers(struct psi_group *group, u64 now)
  459. {
  460. struct psi_trigger *t;
  461. bool new_stall = false;
  462. u64 *total = group->total[PSI_POLL];
  463. /*
  464. * On subsequent updates, calculate growth deltas and let
  465. * watchers know when their specified thresholds are exceeded.
  466. */
  467. list_for_each_entry(t, &group->triggers, node) {
  468. u64 growth;
  469. /* Check for stall activity */
  470. if (group->polling_total[t->state] == total[t->state])
  471. continue;
  472. /*
  473. * Multiple triggers might be looking at the same state,
  474. * remember to update group->polling_total[] once we've
  475. * been through all of them. Also remember to extend the
  476. * polling time if we see new stall activity.
  477. */
  478. new_stall = true;
  479. /* Calculate growth since last update */
  480. growth = window_update(&t->win, now, total[t->state]);
  481. if (growth < t->threshold)
  482. continue;
  483. /* Limit event signaling to once per window */
  484. if (now < t->last_event_time + t->win.size)
  485. continue;
  486. trace_android_vh_psi_event(t);
  487. /* Generate an event */
  488. if (cmpxchg(&t->event, 0, 1) == 0)
  489. wake_up_interruptible(&t->event_wait);
  490. t->last_event_time = now;
  491. }
  492. trace_android_vh_psi_group(group);
  493. if (new_stall)
  494. memcpy(group->polling_total, total,
  495. sizeof(group->polling_total));
  496. return now + group->poll_min_period;
  497. }
  498. /* Schedule polling if it's not already scheduled or forced. */
  499. static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay,
  500. bool force)
  501. {
  502. struct task_struct *task;
  503. /*
  504. * atomic_xchg should be called even when !force to provide a
  505. * full memory barrier (see the comment inside psi_poll_work).
  506. */
  507. if (atomic_xchg(&group->poll_scheduled, 1) && !force)
  508. return;
  509. rcu_read_lock();
  510. task = rcu_dereference(group->poll_task);
  511. /*
  512. * kworker might be NULL in case psi_trigger_destroy races with
  513. * psi_task_change (hotpath) which can't use locks
  514. */
  515. if (likely(task))
  516. mod_timer(&group->poll_timer, jiffies + delay);
  517. else
  518. atomic_set(&group->poll_scheduled, 0);
  519. rcu_read_unlock();
  520. }
  521. static void psi_poll_work(struct psi_group *group)
  522. {
  523. bool force_reschedule = false;
  524. u32 changed_states;
  525. u64 now;
  526. mutex_lock(&group->trigger_lock);
  527. now = sched_clock();
  528. if (now > group->polling_until) {
  529. /*
  530. * We are either about to start or might stop polling if no
  531. * state change was recorded. Resetting poll_scheduled leaves
  532. * a small window for psi_group_change to sneak in and schedule
  533. * an immegiate poll_work before we get to rescheduling. One
  534. * potential extra wakeup at the end of the polling window
  535. * should be negligible and polling_next_update still keeps
  536. * updates correctly on schedule.
  537. */
  538. atomic_set(&group->poll_scheduled, 0);
  539. /*
  540. * A task change can race with the poll worker that is supposed to
  541. * report on it. To avoid missing events, ensure ordering between
  542. * poll_scheduled and the task state accesses, such that if the poll
  543. * worker misses the state update, the task change is guaranteed to
  544. * reschedule the poll worker:
  545. *
  546. * poll worker:
  547. * atomic_set(poll_scheduled, 0)
  548. * smp_mb()
  549. * LOAD states
  550. *
  551. * task change:
  552. * STORE states
  553. * if atomic_xchg(poll_scheduled, 1) == 0:
  554. * schedule poll worker
  555. *
  556. * The atomic_xchg() implies a full barrier.
  557. */
  558. smp_mb();
  559. } else {
  560. /* Polling window is not over, keep rescheduling */
  561. force_reschedule = true;
  562. }
  563. collect_percpu_times(group, PSI_POLL, &changed_states);
  564. if (changed_states & group->poll_states) {
  565. /* Initialize trigger windows when entering polling mode */
  566. if (now > group->polling_until)
  567. init_triggers(group, now);
  568. /*
  569. * Keep the monitor active for at least the duration of the
  570. * minimum tracking window as long as monitor states are
  571. * changing.
  572. */
  573. group->polling_until = now +
  574. group->poll_min_period * UPDATES_PER_WINDOW;
  575. }
  576. if (now > group->polling_until) {
  577. group->polling_next_update = ULLONG_MAX;
  578. goto out;
  579. }
  580. if (now >= group->polling_next_update)
  581. group->polling_next_update = update_triggers(group, now);
  582. psi_schedule_poll_work(group,
  583. nsecs_to_jiffies(group->polling_next_update - now) + 1,
  584. force_reschedule);
  585. out:
  586. mutex_unlock(&group->trigger_lock);
  587. }
  588. static int psi_poll_worker(void *data)
  589. {
  590. struct psi_group *group = (struct psi_group *)data;
  591. sched_set_fifo_low(current);
  592. while (true) {
  593. wait_event_interruptible(group->poll_wait,
  594. atomic_cmpxchg(&group->poll_wakeup, 1, 0) ||
  595. kthread_should_stop());
  596. if (kthread_should_stop())
  597. break;
  598. psi_poll_work(group);
  599. }
  600. return 0;
  601. }
  602. static void poll_timer_fn(struct timer_list *t)
  603. {
  604. struct psi_group *group = from_timer(group, t, poll_timer);
  605. atomic_set(&group->poll_wakeup, 1);
  606. wake_up_interruptible(&group->poll_wait);
  607. }
  608. static void record_times(struct psi_group_cpu *groupc, int cpu,
  609. bool memstall_tick)
  610. {
  611. u32 delta;
  612. u64 now;
  613. now = cpu_clock(cpu);
  614. delta = now - groupc->state_start;
  615. groupc->state_start = now;
  616. if (groupc->state_mask & (1 << PSI_IO_SOME)) {
  617. groupc->times[PSI_IO_SOME] += delta;
  618. if (groupc->state_mask & (1 << PSI_IO_FULL))
  619. groupc->times[PSI_IO_FULL] += delta;
  620. }
  621. if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
  622. groupc->times[PSI_MEM_SOME] += delta;
  623. if (groupc->state_mask & (1 << PSI_MEM_FULL))
  624. groupc->times[PSI_MEM_FULL] += delta;
  625. else if (memstall_tick) {
  626. u32 sample;
  627. /*
  628. * Since we care about lost potential, a
  629. * memstall is FULL when there are no other
  630. * working tasks, but also when the CPU is
  631. * actively reclaiming and nothing productive
  632. * could run even if it were runnable.
  633. *
  634. * When the timer tick sees a reclaiming CPU,
  635. * regardless of runnable tasks, sample a FULL
  636. * tick (or less if it hasn't been a full tick
  637. * since the last state change).
  638. */
  639. sample = min(delta, (u32)jiffies_to_nsecs(1));
  640. groupc->times[PSI_MEM_FULL] += sample;
  641. }
  642. }
  643. if (groupc->state_mask & (1 << PSI_CPU_SOME))
  644. groupc->times[PSI_CPU_SOME] += delta;
  645. if (groupc->state_mask & (1 << PSI_NONIDLE))
  646. groupc->times[PSI_NONIDLE] += delta;
  647. }
  648. static void psi_group_change(struct psi_group *group, int cpu,
  649. unsigned int clear, unsigned int set,
  650. bool wake_clock)
  651. {
  652. struct psi_group_cpu *groupc;
  653. u32 state_mask = 0;
  654. unsigned int t, m;
  655. enum psi_states s;
  656. groupc = per_cpu_ptr(group->pcpu, cpu);
  657. /*
  658. * First we assess the aggregate resource states this CPU's
  659. * tasks have been in since the last change, and account any
  660. * SOME and FULL time these may have resulted in.
  661. *
  662. * Then we update the task counts according to the state
  663. * change requested through the @clear and @set bits.
  664. */
  665. write_seqcount_begin(&groupc->seq);
  666. record_times(groupc, cpu, false);
  667. for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
  668. if (!(m & (1 << t)))
  669. continue;
  670. if (groupc->tasks[t]) {
  671. groupc->tasks[t]--;
  672. } else if (!psi_bug) {
  673. printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
  674. cpu, t, groupc->tasks[0],
  675. groupc->tasks[1], groupc->tasks[2],
  676. groupc->tasks[3], clear, set);
  677. psi_bug = 1;
  678. }
  679. }
  680. for (t = 0; set; set &= ~(1 << t), t++)
  681. if (set & (1 << t))
  682. groupc->tasks[t]++;
  683. /* Calculate state mask representing active states */
  684. for (s = 0; s < NR_PSI_STATES; s++) {
  685. if (test_state(groupc->tasks, s))
  686. state_mask |= (1 << s);
  687. }
  688. groupc->state_mask = state_mask;
  689. write_seqcount_end(&groupc->seq);
  690. if (state_mask & group->poll_states)
  691. psi_schedule_poll_work(group, 1, false);
  692. if (wake_clock && !delayed_work_pending(&group->avgs_work))
  693. schedule_delayed_work(&group->avgs_work, PSI_FREQ);
  694. }
  695. static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
  696. {
  697. if (*iter == &psi_system)
  698. return NULL;
  699. #ifdef CONFIG_CGROUPS
  700. if (static_branch_likely(&psi_cgroups_enabled)) {
  701. struct cgroup *cgroup = NULL;
  702. if (!*iter)
  703. cgroup = task->cgroups->dfl_cgrp;
  704. else
  705. cgroup = cgroup_parent(*iter);
  706. if (cgroup && cgroup_parent(cgroup)) {
  707. *iter = cgroup;
  708. return cgroup_psi(cgroup);
  709. }
  710. }
  711. #endif
  712. *iter = &psi_system;
  713. return &psi_system;
  714. }
  715. static void psi_flags_change(struct task_struct *task, int clear, int set)
  716. {
  717. if (((task->psi_flags & set) ||
  718. (task->psi_flags & clear) != clear) &&
  719. !psi_bug) {
  720. printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
  721. task->pid, task->comm, task_cpu(task),
  722. task->psi_flags, clear, set);
  723. psi_bug = 1;
  724. }
  725. task->psi_flags &= ~clear;
  726. task->psi_flags |= set;
  727. }
  728. void psi_task_change(struct task_struct *task, int clear, int set)
  729. {
  730. int cpu = task_cpu(task);
  731. struct psi_group *group;
  732. bool wake_clock = true;
  733. void *iter = NULL;
  734. if (!task->pid)
  735. return;
  736. psi_flags_change(task, clear, set);
  737. /*
  738. * Periodic aggregation shuts off if there is a period of no
  739. * task changes, so we wake it back up if necessary. However,
  740. * don't do this if the task change is the aggregation worker
  741. * itself going to sleep, or we'll ping-pong forever.
  742. */
  743. if (unlikely((clear & TSK_RUNNING) &&
  744. (task->flags & PF_WQ_WORKER) &&
  745. wq_worker_last_func(task) == psi_avgs_work))
  746. wake_clock = false;
  747. while ((group = iterate_groups(task, &iter)))
  748. psi_group_change(group, cpu, clear, set, wake_clock);
  749. }
  750. void psi_task_switch(struct task_struct *prev, struct task_struct *next,
  751. bool sleep)
  752. {
  753. struct psi_group *group, *common = NULL;
  754. int cpu = task_cpu(prev);
  755. void *iter;
  756. if (next->pid) {
  757. psi_flags_change(next, 0, TSK_ONCPU);
  758. /*
  759. * When moving state between tasks, the group that
  760. * contains them both does not change: we can stop
  761. * updating the tree once we reach the first common
  762. * ancestor. Iterate @next's ancestors until we
  763. * encounter @prev's state.
  764. */
  765. iter = NULL;
  766. while ((group = iterate_groups(next, &iter))) {
  767. if (per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) {
  768. common = group;
  769. break;
  770. }
  771. psi_group_change(group, cpu, 0, TSK_ONCPU, true);
  772. }
  773. }
  774. /*
  775. * If this is a voluntary sleep, dequeue will have taken care
  776. * of the outgoing TSK_ONCPU alongside TSK_RUNNING already. We
  777. * only need to deal with it during preemption.
  778. */
  779. if (sleep)
  780. return;
  781. if (prev->pid) {
  782. psi_flags_change(prev, TSK_ONCPU, 0);
  783. iter = NULL;
  784. while ((group = iterate_groups(prev, &iter)) && group != common)
  785. psi_group_change(group, cpu, TSK_ONCPU, 0, true);
  786. }
  787. }
  788. void psi_memstall_tick(struct task_struct *task, int cpu)
  789. {
  790. struct psi_group *group;
  791. void *iter = NULL;
  792. while ((group = iterate_groups(task, &iter))) {
  793. struct psi_group_cpu *groupc;
  794. groupc = per_cpu_ptr(group->pcpu, cpu);
  795. write_seqcount_begin(&groupc->seq);
  796. record_times(groupc, cpu, true);
  797. write_seqcount_end(&groupc->seq);
  798. }
  799. }
  800. /**
  801. * psi_memstall_enter - mark the beginning of a memory stall section
  802. * @flags: flags to handle nested sections
  803. *
  804. * Marks the calling task as being stalled due to a lack of memory,
  805. * such as waiting for a refault or performing reclaim.
  806. */
  807. void psi_memstall_enter(unsigned long *flags)
  808. {
  809. struct rq_flags rf;
  810. struct rq *rq;
  811. if (static_branch_likely(&psi_disabled))
  812. return;
  813. *flags = current->in_memstall;
  814. if (*flags)
  815. return;
  816. /*
  817. * in_memstall setting & accounting needs to be atomic wrt
  818. * changes to the task's scheduling state, otherwise we can
  819. * race with CPU migration.
  820. */
  821. rq = this_rq_lock_irq(&rf);
  822. current->in_memstall = 1;
  823. psi_task_change(current, 0, TSK_MEMSTALL);
  824. rq_unlock_irq(rq, &rf);
  825. }
  826. /**
  827. * psi_memstall_leave - mark the end of an memory stall section
  828. * @flags: flags to handle nested memdelay sections
  829. *
  830. * Marks the calling task as no longer stalled due to lack of memory.
  831. */
  832. void psi_memstall_leave(unsigned long *flags)
  833. {
  834. struct rq_flags rf;
  835. struct rq *rq;
  836. if (static_branch_likely(&psi_disabled))
  837. return;
  838. if (*flags)
  839. return;
  840. /*
  841. * in_memstall clearing & accounting needs to be atomic wrt
  842. * changes to the task's scheduling state, otherwise we could
  843. * race with CPU migration.
  844. */
  845. rq = this_rq_lock_irq(&rf);
  846. current->in_memstall = 0;
  847. psi_task_change(current, TSK_MEMSTALL, 0);
  848. rq_unlock_irq(rq, &rf);
  849. }
  850. #ifdef CONFIG_CGROUPS
  851. int psi_cgroup_alloc(struct cgroup *cgroup)
  852. {
  853. if (static_branch_likely(&psi_disabled))
  854. return 0;
  855. cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
  856. if (!cgroup->psi.pcpu)
  857. return -ENOMEM;
  858. group_init(&cgroup->psi);
  859. return 0;
  860. }
  861. void psi_cgroup_free(struct cgroup *cgroup)
  862. {
  863. if (static_branch_likely(&psi_disabled))
  864. return;
  865. cancel_delayed_work_sync(&cgroup->psi.avgs_work);
  866. free_percpu(cgroup->psi.pcpu);
  867. /* All triggers must be removed by now */
  868. WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n");
  869. }
  870. /**
  871. * cgroup_move_task - move task to a different cgroup
  872. * @task: the task
  873. * @to: the target css_set
  874. *
  875. * Move task to a new cgroup and safely migrate its associated stall
  876. * state between the different groups.
  877. *
  878. * This function acquires the task's rq lock to lock out concurrent
  879. * changes to the task's scheduling state and - in case the task is
  880. * running - concurrent changes to its stall state.
  881. */
  882. void cgroup_move_task(struct task_struct *task, struct css_set *to)
  883. {
  884. unsigned int task_flags = 0;
  885. struct rq_flags rf;
  886. struct rq *rq;
  887. if (static_branch_likely(&psi_disabled)) {
  888. /*
  889. * Lame to do this here, but the scheduler cannot be locked
  890. * from the outside, so we move cgroups from inside sched/.
  891. */
  892. rcu_assign_pointer(task->cgroups, to);
  893. return;
  894. }
  895. rq = task_rq_lock(task, &rf);
  896. if (task_on_rq_queued(task)) {
  897. task_flags = TSK_RUNNING;
  898. if (task_current(rq, task))
  899. task_flags |= TSK_ONCPU;
  900. } else if (task->in_iowait)
  901. task_flags = TSK_IOWAIT;
  902. if (task->in_memstall)
  903. task_flags |= TSK_MEMSTALL;
  904. if (task_flags)
  905. psi_task_change(task, task_flags, 0);
  906. /* See comment above */
  907. rcu_assign_pointer(task->cgroups, to);
  908. if (task_flags)
  909. psi_task_change(task, 0, task_flags);
  910. task_rq_unlock(rq, task, &rf);
  911. }
  912. #endif /* CONFIG_CGROUPS */
  913. int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
  914. {
  915. int full;
  916. u64 now;
  917. if (static_branch_likely(&psi_disabled))
  918. return -EOPNOTSUPP;
  919. /* Update averages before reporting them */
  920. mutex_lock(&group->avgs_lock);
  921. now = sched_clock();
  922. collect_percpu_times(group, PSI_AVGS, NULL);
  923. if (now >= group->avg_next_update)
  924. group->avg_next_update = update_averages(group, now);
  925. mutex_unlock(&group->avgs_lock);
  926. for (full = 0; full < 2 - (res == PSI_CPU); full++) {
  927. unsigned long avg[3];
  928. u64 total;
  929. int w;
  930. for (w = 0; w < 3; w++)
  931. avg[w] = group->avg[res * 2 + full][w];
  932. total = div_u64(group->total[PSI_AVGS][res * 2 + full],
  933. NSEC_PER_USEC);
  934. seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
  935. full ? "full" : "some",
  936. LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
  937. LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
  938. LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
  939. total);
  940. }
  941. return 0;
  942. }
  943. static int psi_io_show(struct seq_file *m, void *v)
  944. {
  945. return psi_show(m, &psi_system, PSI_IO);
  946. }
  947. static int psi_memory_show(struct seq_file *m, void *v)
  948. {
  949. return psi_show(m, &psi_system, PSI_MEM);
  950. }
  951. static int psi_cpu_show(struct seq_file *m, void *v)
  952. {
  953. return psi_show(m, &psi_system, PSI_CPU);
  954. }
  955. static int psi_io_open(struct inode *inode, struct file *file)
  956. {
  957. return single_open(file, psi_io_show, NULL);
  958. }
  959. static int psi_memory_open(struct inode *inode, struct file *file)
  960. {
  961. return single_open(file, psi_memory_show, NULL);
  962. }
  963. static int psi_cpu_open(struct inode *inode, struct file *file)
  964. {
  965. return single_open(file, psi_cpu_show, NULL);
  966. }
  967. struct psi_trigger *psi_trigger_create(struct psi_group *group,
  968. char *buf, size_t nbytes, enum psi_res res)
  969. {
  970. struct psi_trigger *t;
  971. enum psi_states state;
  972. u32 threshold_us;
  973. u32 window_us;
  974. if (static_branch_likely(&psi_disabled))
  975. return ERR_PTR(-EOPNOTSUPP);
  976. if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
  977. state = PSI_IO_SOME + res * 2;
  978. else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
  979. state = PSI_IO_FULL + res * 2;
  980. else
  981. return ERR_PTR(-EINVAL);
  982. if (state >= PSI_NONIDLE)
  983. return ERR_PTR(-EINVAL);
  984. if (window_us < WINDOW_MIN_US ||
  985. window_us > WINDOW_MAX_US)
  986. return ERR_PTR(-EINVAL);
  987. /* Check threshold */
  988. if (threshold_us == 0 || threshold_us > window_us)
  989. return ERR_PTR(-EINVAL);
  990. t = kmalloc(sizeof(*t), GFP_KERNEL);
  991. if (!t)
  992. return ERR_PTR(-ENOMEM);
  993. t->group = group;
  994. t->state = state;
  995. t->threshold = threshold_us * NSEC_PER_USEC;
  996. t->win.size = window_us * NSEC_PER_USEC;
  997. window_reset(&t->win, 0, 0, 0);
  998. t->event = 0;
  999. t->last_event_time = 0;
  1000. init_waitqueue_head(&t->event_wait);
  1001. mutex_lock(&group->trigger_lock);
  1002. if (!rcu_access_pointer(group->poll_task)) {
  1003. struct task_struct *task;
  1004. task = kthread_create(psi_poll_worker, group, "psimon");
  1005. if (IS_ERR(task)) {
  1006. kfree(t);
  1007. mutex_unlock(&group->trigger_lock);
  1008. return ERR_CAST(task);
  1009. }
  1010. atomic_set(&group->poll_wakeup, 0);
  1011. wake_up_process(task);
  1012. rcu_assign_pointer(group->poll_task, task);
  1013. }
  1014. list_add(&t->node, &group->triggers);
  1015. group->poll_min_period = min(group->poll_min_period,
  1016. div_u64(t->win.size, UPDATES_PER_WINDOW));
  1017. group->nr_triggers[t->state]++;
  1018. group->poll_states |= (1 << t->state);
  1019. mutex_unlock(&group->trigger_lock);
  1020. return t;
  1021. }
  1022. void psi_trigger_destroy(struct psi_trigger *t)
  1023. {
  1024. struct psi_group *group;
  1025. struct task_struct *task_to_destroy = NULL;
  1026. /*
  1027. * We do not check psi_disabled since it might have been disabled after
  1028. * the trigger got created.
  1029. */
  1030. if (!t)
  1031. return;
  1032. group = t->group;
  1033. /*
  1034. * Wakeup waiters to stop polling. Can happen if cgroup is deleted
  1035. * from under a polling process.
  1036. */
  1037. wake_up_interruptible(&t->event_wait);
  1038. mutex_lock(&group->trigger_lock);
  1039. if (!list_empty(&t->node)) {
  1040. struct psi_trigger *tmp;
  1041. u64 period = ULLONG_MAX;
  1042. list_del(&t->node);
  1043. group->nr_triggers[t->state]--;
  1044. if (!group->nr_triggers[t->state])
  1045. group->poll_states &= ~(1 << t->state);
  1046. /* reset min update period for the remaining triggers */
  1047. list_for_each_entry(tmp, &group->triggers, node)
  1048. period = min(period, div_u64(tmp->win.size,
  1049. UPDATES_PER_WINDOW));
  1050. group->poll_min_period = period;
  1051. /* Destroy poll_task when the last trigger is destroyed */
  1052. if (group->poll_states == 0) {
  1053. group->polling_until = 0;
  1054. task_to_destroy = rcu_dereference_protected(
  1055. group->poll_task,
  1056. lockdep_is_held(&group->trigger_lock));
  1057. rcu_assign_pointer(group->poll_task, NULL);
  1058. del_timer(&group->poll_timer);
  1059. }
  1060. }
  1061. mutex_unlock(&group->trigger_lock);
  1062. /*
  1063. * Wait for psi_schedule_poll_work RCU to complete its read-side
  1064. * critical section before destroying the trigger and optionally the
  1065. * poll_task.
  1066. */
  1067. synchronize_rcu();
  1068. /*
  1069. * Stop kthread 'psimon' after releasing trigger_lock to prevent a
  1070. * deadlock while waiting for psi_poll_work to acquire trigger_lock
  1071. */
  1072. if (task_to_destroy) {
  1073. /*
  1074. * After the RCU grace period has expired, the worker
  1075. * can no longer be found through group->poll_task.
  1076. */
  1077. kthread_stop(task_to_destroy);
  1078. atomic_set(&group->poll_scheduled, 0);
  1079. }
  1080. kfree(t);
  1081. }
  1082. __poll_t psi_trigger_poll(void **trigger_ptr,
  1083. struct file *file, poll_table *wait)
  1084. {
  1085. __poll_t ret = DEFAULT_POLLMASK;
  1086. struct psi_trigger *t;
  1087. if (static_branch_likely(&psi_disabled))
  1088. return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
  1089. t = smp_load_acquire(trigger_ptr);
  1090. if (!t)
  1091. return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
  1092. poll_wait(file, &t->event_wait, wait);
  1093. if (cmpxchg(&t->event, 1, 0) == 1)
  1094. ret |= EPOLLPRI;
  1095. return ret;
  1096. }
  1097. static ssize_t psi_write(struct file *file, const char __user *user_buf,
  1098. size_t nbytes, enum psi_res res)
  1099. {
  1100. char buf[32];
  1101. size_t buf_size;
  1102. struct seq_file *seq;
  1103. struct psi_trigger *new;
  1104. if (static_branch_likely(&psi_disabled))
  1105. return -EOPNOTSUPP;
  1106. if (!nbytes)
  1107. return -EINVAL;
  1108. buf_size = min(nbytes, sizeof(buf));
  1109. if (copy_from_user(buf, user_buf, buf_size))
  1110. return -EFAULT;
  1111. buf[buf_size - 1] = '\0';
  1112. seq = file->private_data;
  1113. /* Take seq->lock to protect seq->private from concurrent writes */
  1114. mutex_lock(&seq->lock);
  1115. /* Allow only one trigger per file descriptor */
  1116. if (seq->private) {
  1117. mutex_unlock(&seq->lock);
  1118. return -EBUSY;
  1119. }
  1120. new = psi_trigger_create(&psi_system, buf, nbytes, res);
  1121. if (IS_ERR(new)) {
  1122. mutex_unlock(&seq->lock);
  1123. return PTR_ERR(new);
  1124. }
  1125. smp_store_release(&seq->private, new);
  1126. mutex_unlock(&seq->lock);
  1127. return nbytes;
  1128. }
  1129. static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
  1130. size_t nbytes, loff_t *ppos)
  1131. {
  1132. return psi_write(file, user_buf, nbytes, PSI_IO);
  1133. }
  1134. static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
  1135. size_t nbytes, loff_t *ppos)
  1136. {
  1137. return psi_write(file, user_buf, nbytes, PSI_MEM);
  1138. }
  1139. static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
  1140. size_t nbytes, loff_t *ppos)
  1141. {
  1142. return psi_write(file, user_buf, nbytes, PSI_CPU);
  1143. }
  1144. static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
  1145. {
  1146. struct seq_file *seq = file->private_data;
  1147. return psi_trigger_poll(&seq->private, file, wait);
  1148. }
  1149. static int psi_fop_release(struct inode *inode, struct file *file)
  1150. {
  1151. struct seq_file *seq = file->private_data;
  1152. psi_trigger_destroy(seq->private);
  1153. return single_release(inode, file);
  1154. }
  1155. static const struct proc_ops psi_io_proc_ops = {
  1156. .proc_open = psi_io_open,
  1157. .proc_read = seq_read,
  1158. .proc_lseek = seq_lseek,
  1159. .proc_write = psi_io_write,
  1160. .proc_poll = psi_fop_poll,
  1161. .proc_release = psi_fop_release,
  1162. };
  1163. static const struct proc_ops psi_memory_proc_ops = {
  1164. .proc_open = psi_memory_open,
  1165. .proc_read = seq_read,
  1166. .proc_lseek = seq_lseek,
  1167. .proc_write = psi_memory_write,
  1168. .proc_poll = psi_fop_poll,
  1169. .proc_release = psi_fop_release,
  1170. };
  1171. static const struct proc_ops psi_cpu_proc_ops = {
  1172. .proc_open = psi_cpu_open,
  1173. .proc_read = seq_read,
  1174. .proc_lseek = seq_lseek,
  1175. .proc_write = psi_cpu_write,
  1176. .proc_poll = psi_fop_poll,
  1177. .proc_release = psi_fop_release,
  1178. };
  1179. static int __init psi_proc_init(void)
  1180. {
  1181. if (psi_enable) {
  1182. proc_mkdir("pressure", NULL);
  1183. proc_create("pressure/io", 0, NULL, &psi_io_proc_ops);
  1184. proc_create("pressure/memory", 0, NULL, &psi_memory_proc_ops);
  1185. proc_create("pressure/cpu", 0, NULL, &psi_cpu_proc_ops);
  1186. }
  1187. return 0;
  1188. }
  1189. module_init(psi_proc_init);