padata.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * padata.c - generic interface to process data streams in parallel
  4. *
  5. * See Documentation/core-api/padata.rst for more information.
  6. *
  7. * Copyright (C) 2008, 2009 secunet Security Networks AG
  8. * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
  9. *
  10. * Copyright (c) 2020 Oracle and/or its affiliates.
  11. * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
  12. *
  13. * This program is free software; you can redistribute it and/or modify it
  14. * under the terms and conditions of the GNU General Public License,
  15. * version 2, as published by the Free Software Foundation.
  16. *
  17. * This program is distributed in the hope it will be useful, but WITHOUT
  18. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  19. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  20. * more details.
  21. *
  22. * You should have received a copy of the GNU General Public License along with
  23. * this program; if not, write to the Free Software Foundation, Inc.,
  24. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  25. */
  26. #include <linux/completion.h>
  27. #include <linux/export.h>
  28. #include <linux/cpumask.h>
  29. #include <linux/err.h>
  30. #include <linux/cpu.h>
  31. #include <linux/padata.h>
  32. #include <linux/mutex.h>
  33. #include <linux/sched.h>
  34. #include <linux/slab.h>
  35. #include <linux/sysfs.h>
  36. #include <linux/rcupdate.h>
  37. #define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */
  38. struct padata_work {
  39. struct work_struct pw_work;
  40. struct list_head pw_list; /* padata_free_works linkage */
  41. void *pw_data;
  42. };
  43. static DEFINE_SPINLOCK(padata_works_lock);
  44. static struct padata_work *padata_works;
  45. static LIST_HEAD(padata_free_works);
  46. struct padata_mt_job_state {
  47. spinlock_t lock;
  48. struct completion completion;
  49. struct padata_mt_job *job;
  50. int nworks;
  51. int nworks_fini;
  52. unsigned long chunk_size;
  53. };
  54. static void padata_free_pd(struct parallel_data *pd);
  55. static void __init padata_mt_helper(struct work_struct *work);
  56. static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
  57. {
  58. int cpu, target_cpu;
  59. target_cpu = cpumask_first(pd->cpumask.pcpu);
  60. for (cpu = 0; cpu < cpu_index; cpu++)
  61. target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
  62. return target_cpu;
  63. }
  64. static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
  65. {
  66. /*
  67. * Hash the sequence numbers to the cpus by taking
  68. * seq_nr mod. number of cpus in use.
  69. */
  70. int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
  71. return padata_index_to_cpu(pd, cpu_index);
  72. }
  73. static struct padata_work *padata_work_alloc(void)
  74. {
  75. struct padata_work *pw;
  76. lockdep_assert_held(&padata_works_lock);
  77. if (list_empty(&padata_free_works))
  78. return NULL; /* No more work items allowed to be queued. */
  79. pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
  80. list_del(&pw->pw_list);
  81. return pw;
  82. }
  83. static void padata_work_init(struct padata_work *pw, work_func_t work_fn,
  84. void *data, int flags)
  85. {
  86. if (flags & PADATA_WORK_ONSTACK)
  87. INIT_WORK_ONSTACK(&pw->pw_work, work_fn);
  88. else
  89. INIT_WORK(&pw->pw_work, work_fn);
  90. pw->pw_data = data;
  91. }
  92. static int __init padata_work_alloc_mt(int nworks, void *data,
  93. struct list_head *head)
  94. {
  95. int i;
  96. spin_lock(&padata_works_lock);
  97. /* Start at 1 because the current task participates in the job. */
  98. for (i = 1; i < nworks; ++i) {
  99. struct padata_work *pw = padata_work_alloc();
  100. if (!pw)
  101. break;
  102. padata_work_init(pw, padata_mt_helper, data, 0);
  103. list_add(&pw->pw_list, head);
  104. }
  105. spin_unlock(&padata_works_lock);
  106. return i;
  107. }
  108. static void padata_work_free(struct padata_work *pw)
  109. {
  110. lockdep_assert_held(&padata_works_lock);
  111. list_add(&pw->pw_list, &padata_free_works);
  112. }
  113. static void __init padata_works_free(struct list_head *works)
  114. {
  115. struct padata_work *cur, *next;
  116. if (list_empty(works))
  117. return;
  118. spin_lock(&padata_works_lock);
  119. list_for_each_entry_safe(cur, next, works, pw_list) {
  120. list_del(&cur->pw_list);
  121. padata_work_free(cur);
  122. }
  123. spin_unlock(&padata_works_lock);
  124. }
  125. static void padata_parallel_worker(struct work_struct *parallel_work)
  126. {
  127. struct padata_work *pw = container_of(parallel_work, struct padata_work,
  128. pw_work);
  129. struct padata_priv *padata = pw->pw_data;
  130. local_bh_disable();
  131. padata->parallel(padata);
  132. spin_lock(&padata_works_lock);
  133. padata_work_free(pw);
  134. spin_unlock(&padata_works_lock);
  135. local_bh_enable();
  136. }
  137. /**
  138. * padata_do_parallel - padata parallelization function
  139. *
  140. * @ps: padatashell
  141. * @padata: object to be parallelized
  142. * @cb_cpu: pointer to the CPU that the serialization callback function should
  143. * run on. If it's not in the serial cpumask of @pinst
  144. * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
  145. * none found, returns -EINVAL.
  146. *
  147. * The parallelization callback function will run with BHs off.
  148. * Note: Every object which is parallelized by padata_do_parallel
  149. * must be seen by padata_do_serial.
  150. *
  151. * Return: 0 on success or else negative error code.
  152. */
  153. int padata_do_parallel(struct padata_shell *ps,
  154. struct padata_priv *padata, int *cb_cpu)
  155. {
  156. struct padata_instance *pinst = ps->pinst;
  157. int i, cpu, cpu_index, err;
  158. struct parallel_data *pd;
  159. struct padata_work *pw;
  160. rcu_read_lock_bh();
  161. pd = rcu_dereference_bh(ps->pd);
  162. err = -EINVAL;
  163. if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
  164. goto out;
  165. if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
  166. if (!cpumask_weight(pd->cpumask.cbcpu))
  167. goto out;
  168. /* Select an alternate fallback CPU and notify the caller. */
  169. cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
  170. cpu = cpumask_first(pd->cpumask.cbcpu);
  171. for (i = 0; i < cpu_index; i++)
  172. cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
  173. *cb_cpu = cpu;
  174. }
  175. err = -EBUSY;
  176. if ((pinst->flags & PADATA_RESET))
  177. goto out;
  178. atomic_inc(&pd->refcnt);
  179. padata->pd = pd;
  180. padata->cb_cpu = *cb_cpu;
  181. spin_lock(&padata_works_lock);
  182. padata->seq_nr = ++pd->seq_nr;
  183. pw = padata_work_alloc();
  184. spin_unlock(&padata_works_lock);
  185. rcu_read_unlock_bh();
  186. if (pw) {
  187. padata_work_init(pw, padata_parallel_worker, padata, 0);
  188. queue_work(pinst->parallel_wq, &pw->pw_work);
  189. } else {
  190. /* Maximum works limit exceeded, run in the current task. */
  191. padata->parallel(padata);
  192. }
  193. return 0;
  194. out:
  195. rcu_read_unlock_bh();
  196. return err;
  197. }
  198. EXPORT_SYMBOL(padata_do_parallel);
  199. /*
  200. * padata_find_next - Find the next object that needs serialization.
  201. *
  202. * Return:
  203. * * A pointer to the control struct of the next object that needs
  204. * serialization, if present in one of the percpu reorder queues.
  205. * * NULL, if the next object that needs serialization will
  206. * be parallel processed by another cpu and is not yet present in
  207. * the cpu's reorder queue.
  208. */
  209. static struct padata_priv *padata_find_next(struct parallel_data *pd,
  210. bool remove_object)
  211. {
  212. struct padata_priv *padata;
  213. struct padata_list *reorder;
  214. int cpu = pd->cpu;
  215. reorder = per_cpu_ptr(pd->reorder_list, cpu);
  216. spin_lock(&reorder->lock);
  217. if (list_empty(&reorder->list)) {
  218. spin_unlock(&reorder->lock);
  219. return NULL;
  220. }
  221. padata = list_entry(reorder->list.next, struct padata_priv, list);
  222. /*
  223. * Checks the rare case where two or more parallel jobs have hashed to
  224. * the same CPU and one of the later ones finishes first.
  225. */
  226. if (padata->seq_nr != pd->processed) {
  227. spin_unlock(&reorder->lock);
  228. return NULL;
  229. }
  230. if (remove_object) {
  231. list_del_init(&padata->list);
  232. ++pd->processed;
  233. pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
  234. }
  235. spin_unlock(&reorder->lock);
  236. return padata;
  237. }
  238. static void padata_reorder(struct parallel_data *pd)
  239. {
  240. struct padata_instance *pinst = pd->ps->pinst;
  241. int cb_cpu;
  242. struct padata_priv *padata;
  243. struct padata_serial_queue *squeue;
  244. struct padata_list *reorder;
  245. /*
  246. * We need to ensure that only one cpu can work on dequeueing of
  247. * the reorder queue the time. Calculating in which percpu reorder
  248. * queue the next object will arrive takes some time. A spinlock
  249. * would be highly contended. Also it is not clear in which order
  250. * the objects arrive to the reorder queues. So a cpu could wait to
  251. * get the lock just to notice that there is nothing to do at the
  252. * moment. Therefore we use a trylock and let the holder of the lock
  253. * care for all the objects enqueued during the holdtime of the lock.
  254. */
  255. if (!spin_trylock_bh(&pd->lock))
  256. return;
  257. while (1) {
  258. padata = padata_find_next(pd, true);
  259. /*
  260. * If the next object that needs serialization is parallel
  261. * processed by another cpu and is still on it's way to the
  262. * cpu's reorder queue, nothing to do for now.
  263. */
  264. if (!padata)
  265. break;
  266. cb_cpu = padata->cb_cpu;
  267. squeue = per_cpu_ptr(pd->squeue, cb_cpu);
  268. spin_lock(&squeue->serial.lock);
  269. list_add_tail(&padata->list, &squeue->serial.list);
  270. spin_unlock(&squeue->serial.lock);
  271. queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
  272. }
  273. spin_unlock_bh(&pd->lock);
  274. /*
  275. * The next object that needs serialization might have arrived to
  276. * the reorder queues in the meantime.
  277. *
  278. * Ensure reorder queue is read after pd->lock is dropped so we see
  279. * new objects from another task in padata_do_serial. Pairs with
  280. * smp_mb in padata_do_serial.
  281. */
  282. smp_mb();
  283. reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
  284. if (!list_empty(&reorder->list) && padata_find_next(pd, false))
  285. queue_work(pinst->serial_wq, &pd->reorder_work);
  286. }
  287. static void invoke_padata_reorder(struct work_struct *work)
  288. {
  289. struct parallel_data *pd;
  290. local_bh_disable();
  291. pd = container_of(work, struct parallel_data, reorder_work);
  292. padata_reorder(pd);
  293. local_bh_enable();
  294. }
  295. static void padata_serial_worker(struct work_struct *serial_work)
  296. {
  297. struct padata_serial_queue *squeue;
  298. struct parallel_data *pd;
  299. LIST_HEAD(local_list);
  300. int cnt;
  301. local_bh_disable();
  302. squeue = container_of(serial_work, struct padata_serial_queue, work);
  303. pd = squeue->pd;
  304. spin_lock(&squeue->serial.lock);
  305. list_replace_init(&squeue->serial.list, &local_list);
  306. spin_unlock(&squeue->serial.lock);
  307. cnt = 0;
  308. while (!list_empty(&local_list)) {
  309. struct padata_priv *padata;
  310. padata = list_entry(local_list.next,
  311. struct padata_priv, list);
  312. list_del_init(&padata->list);
  313. padata->serial(padata);
  314. cnt++;
  315. }
  316. local_bh_enable();
  317. if (atomic_sub_and_test(cnt, &pd->refcnt))
  318. padata_free_pd(pd);
  319. }
  320. /**
  321. * padata_do_serial - padata serialization function
  322. *
  323. * @padata: object to be serialized.
  324. *
  325. * padata_do_serial must be called for every parallelized object.
  326. * The serialization callback function will run with BHs off.
  327. */
  328. void padata_do_serial(struct padata_priv *padata)
  329. {
  330. struct parallel_data *pd = padata->pd;
  331. int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
  332. struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
  333. struct padata_priv *cur;
  334. spin_lock(&reorder->lock);
  335. /* Sort in ascending order of sequence number. */
  336. list_for_each_entry_reverse(cur, &reorder->list, list)
  337. if (cur->seq_nr < padata->seq_nr)
  338. break;
  339. list_add(&padata->list, &cur->list);
  340. spin_unlock(&reorder->lock);
  341. /*
  342. * Ensure the addition to the reorder list is ordered correctly
  343. * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
  344. * in padata_reorder.
  345. */
  346. smp_mb();
  347. padata_reorder(pd);
  348. }
  349. EXPORT_SYMBOL(padata_do_serial);
  350. static int padata_setup_cpumasks(struct padata_instance *pinst)
  351. {
  352. struct workqueue_attrs *attrs;
  353. int err;
  354. attrs = alloc_workqueue_attrs();
  355. if (!attrs)
  356. return -ENOMEM;
  357. /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
  358. cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
  359. err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
  360. free_workqueue_attrs(attrs);
  361. return err;
  362. }
  363. static void __init padata_mt_helper(struct work_struct *w)
  364. {
  365. struct padata_work *pw = container_of(w, struct padata_work, pw_work);
  366. struct padata_mt_job_state *ps = pw->pw_data;
  367. struct padata_mt_job *job = ps->job;
  368. bool done;
  369. spin_lock(&ps->lock);
  370. while (job->size > 0) {
  371. unsigned long start, size, end;
  372. start = job->start;
  373. /* So end is chunk size aligned if enough work remains. */
  374. size = roundup(start + 1, ps->chunk_size) - start;
  375. size = min(size, job->size);
  376. end = start + size;
  377. job->start = end;
  378. job->size -= size;
  379. spin_unlock(&ps->lock);
  380. job->thread_fn(start, end, job->fn_arg);
  381. spin_lock(&ps->lock);
  382. }
  383. ++ps->nworks_fini;
  384. done = (ps->nworks_fini == ps->nworks);
  385. spin_unlock(&ps->lock);
  386. if (done)
  387. complete(&ps->completion);
  388. }
  389. /**
  390. * padata_do_multithreaded - run a multithreaded job
  391. * @job: Description of the job.
  392. *
  393. * See the definition of struct padata_mt_job for more details.
  394. */
  395. void __init padata_do_multithreaded(struct padata_mt_job *job)
  396. {
  397. /* In case threads finish at different times. */
  398. static const unsigned long load_balance_factor = 4;
  399. struct padata_work my_work, *pw;
  400. struct padata_mt_job_state ps;
  401. LIST_HEAD(works);
  402. int nworks;
  403. if (job->size == 0)
  404. return;
  405. /* Ensure at least one thread when size < min_chunk. */
  406. nworks = max(job->size / job->min_chunk, 1ul);
  407. nworks = min(nworks, job->max_threads);
  408. if (nworks == 1) {
  409. /* Single thread, no coordination needed, cut to the chase. */
  410. job->thread_fn(job->start, job->start + job->size, job->fn_arg);
  411. return;
  412. }
  413. spin_lock_init(&ps.lock);
  414. init_completion(&ps.completion);
  415. ps.job = job;
  416. ps.nworks = padata_work_alloc_mt(nworks, &ps, &works);
  417. ps.nworks_fini = 0;
  418. /*
  419. * Chunk size is the amount of work a helper does per call to the
  420. * thread function. Load balance large jobs between threads by
  421. * increasing the number of chunks, guarantee at least the minimum
  422. * chunk size from the caller, and honor the caller's alignment.
  423. */
  424. ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
  425. ps.chunk_size = max(ps.chunk_size, job->min_chunk);
  426. ps.chunk_size = roundup(ps.chunk_size, job->align);
  427. list_for_each_entry(pw, &works, pw_list)
  428. queue_work(system_unbound_wq, &pw->pw_work);
  429. /* Use the current thread, which saves starting a workqueue worker. */
  430. padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
  431. padata_mt_helper(&my_work.pw_work);
  432. /* Wait for all the helpers to finish. */
  433. wait_for_completion(&ps.completion);
  434. destroy_work_on_stack(&my_work.pw_work);
  435. padata_works_free(&works);
  436. }
  437. static void __padata_list_init(struct padata_list *pd_list)
  438. {
  439. INIT_LIST_HEAD(&pd_list->list);
  440. spin_lock_init(&pd_list->lock);
  441. }
  442. /* Initialize all percpu queues used by serial workers */
  443. static void padata_init_squeues(struct parallel_data *pd)
  444. {
  445. int cpu;
  446. struct padata_serial_queue *squeue;
  447. for_each_cpu(cpu, pd->cpumask.cbcpu) {
  448. squeue = per_cpu_ptr(pd->squeue, cpu);
  449. squeue->pd = pd;
  450. __padata_list_init(&squeue->serial);
  451. INIT_WORK(&squeue->work, padata_serial_worker);
  452. }
  453. }
  454. /* Initialize per-CPU reorder lists */
  455. static void padata_init_reorder_list(struct parallel_data *pd)
  456. {
  457. int cpu;
  458. struct padata_list *list;
  459. for_each_cpu(cpu, pd->cpumask.pcpu) {
  460. list = per_cpu_ptr(pd->reorder_list, cpu);
  461. __padata_list_init(list);
  462. }
  463. }
  464. /* Allocate and initialize the internal cpumask dependend resources. */
  465. static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
  466. {
  467. struct padata_instance *pinst = ps->pinst;
  468. struct parallel_data *pd;
  469. pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
  470. if (!pd)
  471. goto err;
  472. pd->reorder_list = alloc_percpu(struct padata_list);
  473. if (!pd->reorder_list)
  474. goto err_free_pd;
  475. pd->squeue = alloc_percpu(struct padata_serial_queue);
  476. if (!pd->squeue)
  477. goto err_free_reorder_list;
  478. pd->ps = ps;
  479. if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
  480. goto err_free_squeue;
  481. if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
  482. goto err_free_pcpu;
  483. cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
  484. cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
  485. padata_init_reorder_list(pd);
  486. padata_init_squeues(pd);
  487. pd->seq_nr = -1;
  488. atomic_set(&pd->refcnt, 1);
  489. spin_lock_init(&pd->lock);
  490. pd->cpu = cpumask_first(pd->cpumask.pcpu);
  491. INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
  492. return pd;
  493. err_free_pcpu:
  494. free_cpumask_var(pd->cpumask.pcpu);
  495. err_free_squeue:
  496. free_percpu(pd->squeue);
  497. err_free_reorder_list:
  498. free_percpu(pd->reorder_list);
  499. err_free_pd:
  500. kfree(pd);
  501. err:
  502. return NULL;
  503. }
  504. static void padata_free_pd(struct parallel_data *pd)
  505. {
  506. free_cpumask_var(pd->cpumask.pcpu);
  507. free_cpumask_var(pd->cpumask.cbcpu);
  508. free_percpu(pd->reorder_list);
  509. free_percpu(pd->squeue);
  510. kfree(pd);
  511. }
  512. static void __padata_start(struct padata_instance *pinst)
  513. {
  514. pinst->flags |= PADATA_INIT;
  515. }
  516. static void __padata_stop(struct padata_instance *pinst)
  517. {
  518. if (!(pinst->flags & PADATA_INIT))
  519. return;
  520. pinst->flags &= ~PADATA_INIT;
  521. synchronize_rcu();
  522. }
  523. /* Replace the internal control structure with a new one. */
  524. static int padata_replace_one(struct padata_shell *ps)
  525. {
  526. struct parallel_data *pd_new;
  527. pd_new = padata_alloc_pd(ps);
  528. if (!pd_new)
  529. return -ENOMEM;
  530. ps->opd = rcu_dereference_protected(ps->pd, 1);
  531. rcu_assign_pointer(ps->pd, pd_new);
  532. return 0;
  533. }
  534. static int padata_replace(struct padata_instance *pinst)
  535. {
  536. struct padata_shell *ps;
  537. int err = 0;
  538. pinst->flags |= PADATA_RESET;
  539. list_for_each_entry(ps, &pinst->pslist, list) {
  540. err = padata_replace_one(ps);
  541. if (err)
  542. break;
  543. }
  544. synchronize_rcu();
  545. list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
  546. if (atomic_dec_and_test(&ps->opd->refcnt))
  547. padata_free_pd(ps->opd);
  548. pinst->flags &= ~PADATA_RESET;
  549. return err;
  550. }
  551. /* If cpumask contains no active cpu, we mark the instance as invalid. */
  552. static bool padata_validate_cpumask(struct padata_instance *pinst,
  553. const struct cpumask *cpumask)
  554. {
  555. if (!cpumask_intersects(cpumask, cpu_online_mask)) {
  556. pinst->flags |= PADATA_INVALID;
  557. return false;
  558. }
  559. pinst->flags &= ~PADATA_INVALID;
  560. return true;
  561. }
  562. static int __padata_set_cpumasks(struct padata_instance *pinst,
  563. cpumask_var_t pcpumask,
  564. cpumask_var_t cbcpumask)
  565. {
  566. int valid;
  567. int err;
  568. valid = padata_validate_cpumask(pinst, pcpumask);
  569. if (!valid) {
  570. __padata_stop(pinst);
  571. goto out_replace;
  572. }
  573. valid = padata_validate_cpumask(pinst, cbcpumask);
  574. if (!valid)
  575. __padata_stop(pinst);
  576. out_replace:
  577. cpumask_copy(pinst->cpumask.pcpu, pcpumask);
  578. cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
  579. err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
  580. if (valid)
  581. __padata_start(pinst);
  582. return err;
  583. }
  584. /**
  585. * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
  586. * equivalent to @cpumask.
  587. * @pinst: padata instance
  588. * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
  589. * to parallel and serial cpumasks respectively.
  590. * @cpumask: the cpumask to use
  591. *
  592. * Return: 0 on success or negative error code
  593. */
  594. int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
  595. cpumask_var_t cpumask)
  596. {
  597. struct cpumask *serial_mask, *parallel_mask;
  598. int err = -EINVAL;
  599. get_online_cpus();
  600. mutex_lock(&pinst->lock);
  601. switch (cpumask_type) {
  602. case PADATA_CPU_PARALLEL:
  603. serial_mask = pinst->cpumask.cbcpu;
  604. parallel_mask = cpumask;
  605. break;
  606. case PADATA_CPU_SERIAL:
  607. parallel_mask = pinst->cpumask.pcpu;
  608. serial_mask = cpumask;
  609. break;
  610. default:
  611. goto out;
  612. }
  613. err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
  614. out:
  615. mutex_unlock(&pinst->lock);
  616. put_online_cpus();
  617. return err;
  618. }
  619. EXPORT_SYMBOL(padata_set_cpumask);
  620. #ifdef CONFIG_HOTPLUG_CPU
  621. static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
  622. {
  623. int err = 0;
  624. if (cpumask_test_cpu(cpu, cpu_online_mask)) {
  625. err = padata_replace(pinst);
  626. if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
  627. padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
  628. __padata_start(pinst);
  629. }
  630. return err;
  631. }
  632. static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
  633. {
  634. int err = 0;
  635. if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
  636. if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
  637. !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
  638. __padata_stop(pinst);
  639. err = padata_replace(pinst);
  640. }
  641. return err;
  642. }
  643. static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
  644. {
  645. return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
  646. cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
  647. }
  648. static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
  649. {
  650. struct padata_instance *pinst;
  651. int ret;
  652. pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
  653. if (!pinst_has_cpu(pinst, cpu))
  654. return 0;
  655. mutex_lock(&pinst->lock);
  656. ret = __padata_add_cpu(pinst, cpu);
  657. mutex_unlock(&pinst->lock);
  658. return ret;
  659. }
  660. static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
  661. {
  662. struct padata_instance *pinst;
  663. int ret;
  664. pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
  665. if (!pinst_has_cpu(pinst, cpu))
  666. return 0;
  667. mutex_lock(&pinst->lock);
  668. ret = __padata_remove_cpu(pinst, cpu);
  669. mutex_unlock(&pinst->lock);
  670. return ret;
  671. }
  672. static enum cpuhp_state hp_online;
  673. #endif
  674. static void __padata_free(struct padata_instance *pinst)
  675. {
  676. #ifdef CONFIG_HOTPLUG_CPU
  677. cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
  678. &pinst->cpu_dead_node);
  679. cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
  680. #endif
  681. WARN_ON(!list_empty(&pinst->pslist));
  682. free_cpumask_var(pinst->cpumask.pcpu);
  683. free_cpumask_var(pinst->cpumask.cbcpu);
  684. destroy_workqueue(pinst->serial_wq);
  685. destroy_workqueue(pinst->parallel_wq);
  686. kfree(pinst);
  687. }
  688. #define kobj2pinst(_kobj) \
  689. container_of(_kobj, struct padata_instance, kobj)
  690. #define attr2pentry(_attr) \
  691. container_of(_attr, struct padata_sysfs_entry, attr)
  692. static void padata_sysfs_release(struct kobject *kobj)
  693. {
  694. struct padata_instance *pinst = kobj2pinst(kobj);
  695. __padata_free(pinst);
  696. }
  697. struct padata_sysfs_entry {
  698. struct attribute attr;
  699. ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
  700. ssize_t (*store)(struct padata_instance *, struct attribute *,
  701. const char *, size_t);
  702. };
  703. static ssize_t show_cpumask(struct padata_instance *pinst,
  704. struct attribute *attr, char *buf)
  705. {
  706. struct cpumask *cpumask;
  707. ssize_t len;
  708. mutex_lock(&pinst->lock);
  709. if (!strcmp(attr->name, "serial_cpumask"))
  710. cpumask = pinst->cpumask.cbcpu;
  711. else
  712. cpumask = pinst->cpumask.pcpu;
  713. len = snprintf(buf, PAGE_SIZE, "%*pb\n",
  714. nr_cpu_ids, cpumask_bits(cpumask));
  715. mutex_unlock(&pinst->lock);
  716. return len < PAGE_SIZE ? len : -EINVAL;
  717. }
  718. static ssize_t store_cpumask(struct padata_instance *pinst,
  719. struct attribute *attr,
  720. const char *buf, size_t count)
  721. {
  722. cpumask_var_t new_cpumask;
  723. ssize_t ret;
  724. int mask_type;
  725. if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
  726. return -ENOMEM;
  727. ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
  728. nr_cpumask_bits);
  729. if (ret < 0)
  730. goto out;
  731. mask_type = !strcmp(attr->name, "serial_cpumask") ?
  732. PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
  733. ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
  734. if (!ret)
  735. ret = count;
  736. out:
  737. free_cpumask_var(new_cpumask);
  738. return ret;
  739. }
  740. #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
  741. static struct padata_sysfs_entry _name##_attr = \
  742. __ATTR(_name, 0644, _show_name, _store_name)
  743. #define PADATA_ATTR_RO(_name, _show_name) \
  744. static struct padata_sysfs_entry _name##_attr = \
  745. __ATTR(_name, 0400, _show_name, NULL)
  746. PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
  747. PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
  748. /*
  749. * Padata sysfs provides the following objects:
  750. * serial_cpumask [RW] - cpumask for serial workers
  751. * parallel_cpumask [RW] - cpumask for parallel workers
  752. */
  753. static struct attribute *padata_default_attrs[] = {
  754. &serial_cpumask_attr.attr,
  755. &parallel_cpumask_attr.attr,
  756. NULL,
  757. };
  758. ATTRIBUTE_GROUPS(padata_default);
  759. static ssize_t padata_sysfs_show(struct kobject *kobj,
  760. struct attribute *attr, char *buf)
  761. {
  762. struct padata_instance *pinst;
  763. struct padata_sysfs_entry *pentry;
  764. ssize_t ret = -EIO;
  765. pinst = kobj2pinst(kobj);
  766. pentry = attr2pentry(attr);
  767. if (pentry->show)
  768. ret = pentry->show(pinst, attr, buf);
  769. return ret;
  770. }
  771. static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
  772. const char *buf, size_t count)
  773. {
  774. struct padata_instance *pinst;
  775. struct padata_sysfs_entry *pentry;
  776. ssize_t ret = -EIO;
  777. pinst = kobj2pinst(kobj);
  778. pentry = attr2pentry(attr);
  779. if (pentry->show)
  780. ret = pentry->store(pinst, attr, buf, count);
  781. return ret;
  782. }
  783. static const struct sysfs_ops padata_sysfs_ops = {
  784. .show = padata_sysfs_show,
  785. .store = padata_sysfs_store,
  786. };
  787. static struct kobj_type padata_attr_type = {
  788. .sysfs_ops = &padata_sysfs_ops,
  789. .default_groups = padata_default_groups,
  790. .release = padata_sysfs_release,
  791. };
  792. /**
  793. * padata_alloc - allocate and initialize a padata instance
  794. * @name: used to identify the instance
  795. *
  796. * Return: new instance on success, NULL on error
  797. */
  798. struct padata_instance *padata_alloc(const char *name)
  799. {
  800. struct padata_instance *pinst;
  801. pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
  802. if (!pinst)
  803. goto err;
  804. pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
  805. name);
  806. if (!pinst->parallel_wq)
  807. goto err_free_inst;
  808. get_online_cpus();
  809. pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
  810. WQ_CPU_INTENSIVE, 1, name);
  811. if (!pinst->serial_wq)
  812. goto err_put_cpus;
  813. if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
  814. goto err_free_serial_wq;
  815. if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
  816. free_cpumask_var(pinst->cpumask.pcpu);
  817. goto err_free_serial_wq;
  818. }
  819. INIT_LIST_HEAD(&pinst->pslist);
  820. cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
  821. cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
  822. if (padata_setup_cpumasks(pinst))
  823. goto err_free_masks;
  824. __padata_start(pinst);
  825. kobject_init(&pinst->kobj, &padata_attr_type);
  826. mutex_init(&pinst->lock);
  827. #ifdef CONFIG_HOTPLUG_CPU
  828. cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
  829. &pinst->cpu_online_node);
  830. cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
  831. &pinst->cpu_dead_node);
  832. #endif
  833. put_online_cpus();
  834. return pinst;
  835. err_free_masks:
  836. free_cpumask_var(pinst->cpumask.pcpu);
  837. free_cpumask_var(pinst->cpumask.cbcpu);
  838. err_free_serial_wq:
  839. destroy_workqueue(pinst->serial_wq);
  840. err_put_cpus:
  841. put_online_cpus();
  842. destroy_workqueue(pinst->parallel_wq);
  843. err_free_inst:
  844. kfree(pinst);
  845. err:
  846. return NULL;
  847. }
  848. EXPORT_SYMBOL(padata_alloc);
  849. /**
  850. * padata_free - free a padata instance
  851. *
  852. * @pinst: padata instance to free
  853. */
  854. void padata_free(struct padata_instance *pinst)
  855. {
  856. kobject_put(&pinst->kobj);
  857. }
  858. EXPORT_SYMBOL(padata_free);
  859. /**
  860. * padata_alloc_shell - Allocate and initialize padata shell.
  861. *
  862. * @pinst: Parent padata_instance object.
  863. *
  864. * Return: new shell on success, NULL on error
  865. */
  866. struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
  867. {
  868. struct parallel_data *pd;
  869. struct padata_shell *ps;
  870. ps = kzalloc(sizeof(*ps), GFP_KERNEL);
  871. if (!ps)
  872. goto out;
  873. ps->pinst = pinst;
  874. get_online_cpus();
  875. pd = padata_alloc_pd(ps);
  876. put_online_cpus();
  877. if (!pd)
  878. goto out_free_ps;
  879. mutex_lock(&pinst->lock);
  880. RCU_INIT_POINTER(ps->pd, pd);
  881. list_add(&ps->list, &pinst->pslist);
  882. mutex_unlock(&pinst->lock);
  883. return ps;
  884. out_free_ps:
  885. kfree(ps);
  886. out:
  887. return NULL;
  888. }
  889. EXPORT_SYMBOL(padata_alloc_shell);
  890. /**
  891. * padata_free_shell - free a padata shell
  892. *
  893. * @ps: padata shell to free
  894. */
  895. void padata_free_shell(struct padata_shell *ps)
  896. {
  897. if (!ps)
  898. return;
  899. mutex_lock(&ps->pinst->lock);
  900. list_del(&ps->list);
  901. padata_free_pd(rcu_dereference_protected(ps->pd, 1));
  902. mutex_unlock(&ps->pinst->lock);
  903. kfree(ps);
  904. }
  905. EXPORT_SYMBOL(padata_free_shell);
  906. void __init padata_init(void)
  907. {
  908. unsigned int i, possible_cpus;
  909. #ifdef CONFIG_HOTPLUG_CPU
  910. int ret;
  911. ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
  912. padata_cpu_online, NULL);
  913. if (ret < 0)
  914. goto err;
  915. hp_online = ret;
  916. ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
  917. NULL, padata_cpu_dead);
  918. if (ret < 0)
  919. goto remove_online_state;
  920. #endif
  921. possible_cpus = num_possible_cpus();
  922. padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work),
  923. GFP_KERNEL);
  924. if (!padata_works)
  925. goto remove_dead_state;
  926. for (i = 0; i < possible_cpus; ++i)
  927. list_add(&padata_works[i].pw_list, &padata_free_works);
  928. return;
  929. remove_dead_state:
  930. #ifdef CONFIG_HOTPLUG_CPU
  931. cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
  932. remove_online_state:
  933. cpuhp_remove_multi_state(hp_online);
  934. err:
  935. #endif
  936. pr_warn("padata: initialization failed\n");
  937. }