dm-kcopyd.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102
  1. /*
  2. * Copyright (C) 2002 Sistina Software (UK) Limited.
  3. * Copyright (C) 2006 Red Hat GmbH
  4. *
  5. * This file is released under the GPL.
  6. *
  7. * Kcopyd provides a simple interface for copying an area of one
  8. * block-device to one or more other block-devices, with an asynchronous
  9. * completion notification.
  10. */
  11. #include <linux/types.h>
  12. #include <linux/atomic.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/fs.h>
  15. #include <linux/init.h>
  16. #include <linux/list.h>
  17. #include <linux/mempool.h>
  18. #include <linux/module.h>
  19. #include <linux/of_platform.h>
  20. #include <linux/of_reserved_mem.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/slab.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/workqueue.h>
  25. #include <linux/mutex.h>
  26. #include <linux/delay.h>
  27. #include <linux/device-mapper.h>
  28. #include <linux/dm-kcopyd.h>
  29. #include "dm-core.h"
  30. #define SPLIT_COUNT 8
  31. #define MIN_JOBS 8
  32. #define DEFAULT_SUB_JOB_SIZE_KB 512
  33. #define MAX_SUB_JOB_SIZE_KB 1024
  34. static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
  35. module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
  36. MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
  37. static bool rsm_enabled;
  38. static phys_addr_t rsm_mem_base, rsm_mem_size;
  39. #ifndef MODULE
  40. static DEFINE_SPINLOCK(rsm_lock);
  41. static int *rsm_mem;
  42. static int rsm_page_cnt;
  43. static int rsm_tbl_idx;
  44. static struct reserved_mem *rmem;
  45. static void __init kcopyd_rsm_init(void)
  46. {
  47. static struct device_node *rsm_node;
  48. int ret = 0;
  49. if (!rsm_enabled)
  50. return;
  51. rsm_node = of_find_compatible_node(NULL, NULL, "mediatek,dm_ota");
  52. if (!rsm_node) {
  53. ret = -ENODEV;
  54. goto out;
  55. }
  56. rmem = of_reserved_mem_lookup(rsm_node);
  57. if (!rmem) {
  58. ret = -EINVAL;
  59. goto out_put_node;
  60. }
  61. rsm_mem_base = rmem->base;
  62. rsm_mem_size = rmem->size;
  63. rsm_page_cnt = rsm_mem_size / PAGE_SIZE;
  64. rsm_mem = kcalloc(rsm_page_cnt, sizeof(int), GFP_KERNEL);
  65. if (!rsm_mem)
  66. ret = -ENOMEM;
  67. out_put_node:
  68. of_node_put(rsm_node);
  69. out:
  70. if (ret)
  71. pr_warn("kcopyd: failed to init rsm: %d", ret);
  72. }
  73. static int __init kcopyd_rsm_enable(char *str)
  74. {
  75. rsm_enabled = true;
  76. return 0;
  77. }
  78. early_param("mtk_kcopyd_quirk", kcopyd_rsm_enable);
  79. static void kcopyd_rsm_get_page(struct page **p)
  80. {
  81. int i;
  82. unsigned long flags;
  83. *p = NULL;
  84. spin_lock_irqsave(&rsm_lock, flags);
  85. for (i = 0 ; i < rsm_page_cnt ; i++) {
  86. rsm_tbl_idx = (rsm_tbl_idx + 1 == rsm_page_cnt) ? 0 : rsm_tbl_idx + 1;
  87. if (rsm_mem[rsm_tbl_idx] == 0) {
  88. rsm_mem[rsm_tbl_idx] = 1;
  89. *p = virt_to_page(phys_to_virt(rsm_mem_base + PAGE_SIZE
  90. * rsm_tbl_idx));
  91. break;
  92. }
  93. }
  94. spin_unlock_irqrestore(&rsm_lock, flags);
  95. }
  96. static void kcopyd_rsm_drop_page(struct page **p)
  97. {
  98. u64 off;
  99. unsigned long flags;
  100. if (*p) {
  101. off = page_to_phys(*p) - rsm_mem_base;
  102. spin_lock_irqsave(&rsm_lock, flags);
  103. rsm_mem[off >> PAGE_SHIFT] = 0;
  104. spin_unlock_irqrestore(&rsm_lock, flags);
  105. *p = NULL;
  106. }
  107. }
  108. static void kcopyd_rsm_destroy(void)
  109. {
  110. if (rsm_enabled)
  111. kfree(rsm_mem);
  112. }
  113. #else
  114. #define kcopyd_rsm_destroy(...)
  115. #define kcopyd_rsm_drop_page(...)
  116. #define kcopyd_rsm_get_page(...)
  117. #define kcopyd_rsm_init(...)
  118. #endif
  119. static unsigned dm_get_kcopyd_subjob_size(void)
  120. {
  121. unsigned sub_job_size_kb;
  122. sub_job_size_kb = __dm_get_module_param(&kcopyd_subjob_size_kb,
  123. DEFAULT_SUB_JOB_SIZE_KB,
  124. MAX_SUB_JOB_SIZE_KB);
  125. return sub_job_size_kb << 1;
  126. }
  127. /*-----------------------------------------------------------------
  128. * Each kcopyd client has its own little pool of preallocated
  129. * pages for kcopyd io.
  130. *---------------------------------------------------------------*/
  131. struct dm_kcopyd_client {
  132. struct page_list *pages;
  133. unsigned nr_reserved_pages;
  134. unsigned nr_free_pages;
  135. unsigned sub_job_size;
  136. struct dm_io_client *io_client;
  137. wait_queue_head_t destroyq;
  138. mempool_t job_pool;
  139. struct workqueue_struct *kcopyd_wq;
  140. struct work_struct kcopyd_work;
  141. struct dm_kcopyd_throttle *throttle;
  142. atomic_t nr_jobs;
  143. /*
  144. * We maintain four lists of jobs:
  145. *
  146. * i) jobs waiting for pages
  147. * ii) jobs that have pages, and are waiting for the io to be issued.
  148. * iii) jobs that don't need to do any IO and just run a callback
  149. * iv) jobs that have completed.
  150. *
  151. * All four of these are protected by job_lock.
  152. */
  153. spinlock_t job_lock;
  154. struct list_head callback_jobs;
  155. struct list_head complete_jobs;
  156. struct list_head io_jobs;
  157. struct list_head pages_jobs;
  158. };
  159. static struct page_list zero_page_list;
  160. static DEFINE_SPINLOCK(throttle_spinlock);
  161. /*
  162. * IO/IDLE accounting slowly decays after (1 << ACCOUNT_INTERVAL_SHIFT) period.
  163. * When total_period >= (1 << ACCOUNT_INTERVAL_SHIFT) the counters are divided
  164. * by 2.
  165. */
  166. #define ACCOUNT_INTERVAL_SHIFT SHIFT_HZ
  167. /*
  168. * Sleep this number of milliseconds.
  169. *
  170. * The value was decided experimentally.
  171. * Smaller values seem to cause an increased copy rate above the limit.
  172. * The reason for this is unknown but possibly due to jiffies rounding errors
  173. * or read/write cache inside the disk.
  174. */
  175. #define SLEEP_MSEC 100
  176. /*
  177. * Maximum number of sleep events. There is a theoretical livelock if more
  178. * kcopyd clients do work simultaneously which this limit avoids.
  179. */
  180. #define MAX_SLEEPS 10
  181. static void io_job_start(struct dm_kcopyd_throttle *t)
  182. {
  183. unsigned throttle, now, difference;
  184. int slept = 0, skew;
  185. if (unlikely(!t))
  186. return;
  187. try_again:
  188. spin_lock_irq(&throttle_spinlock);
  189. throttle = READ_ONCE(t->throttle);
  190. if (likely(throttle >= 100))
  191. goto skip_limit;
  192. now = jiffies;
  193. difference = now - t->last_jiffies;
  194. t->last_jiffies = now;
  195. if (t->num_io_jobs)
  196. t->io_period += difference;
  197. t->total_period += difference;
  198. /*
  199. * Maintain sane values if we got a temporary overflow.
  200. */
  201. if (unlikely(t->io_period > t->total_period))
  202. t->io_period = t->total_period;
  203. if (unlikely(t->total_period >= (1 << ACCOUNT_INTERVAL_SHIFT))) {
  204. int shift = fls(t->total_period >> ACCOUNT_INTERVAL_SHIFT);
  205. t->total_period >>= shift;
  206. t->io_period >>= shift;
  207. }
  208. skew = t->io_period - throttle * t->total_period / 100;
  209. if (unlikely(skew > 0) && slept < MAX_SLEEPS) {
  210. slept++;
  211. spin_unlock_irq(&throttle_spinlock);
  212. msleep(SLEEP_MSEC);
  213. goto try_again;
  214. }
  215. skip_limit:
  216. t->num_io_jobs++;
  217. spin_unlock_irq(&throttle_spinlock);
  218. }
  219. static void io_job_finish(struct dm_kcopyd_throttle *t)
  220. {
  221. unsigned long flags;
  222. if (unlikely(!t))
  223. return;
  224. spin_lock_irqsave(&throttle_spinlock, flags);
  225. t->num_io_jobs--;
  226. if (likely(READ_ONCE(t->throttle) >= 100))
  227. goto skip_limit;
  228. if (!t->num_io_jobs) {
  229. unsigned now, difference;
  230. now = jiffies;
  231. difference = now - t->last_jiffies;
  232. t->last_jiffies = now;
  233. t->io_period += difference;
  234. t->total_period += difference;
  235. /*
  236. * Maintain sane values if we got a temporary overflow.
  237. */
  238. if (unlikely(t->io_period > t->total_period))
  239. t->io_period = t->total_period;
  240. }
  241. skip_limit:
  242. spin_unlock_irqrestore(&throttle_spinlock, flags);
  243. }
  244. static void wake(struct dm_kcopyd_client *kc)
  245. {
  246. queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
  247. }
  248. /*
  249. * Obtain one page for the use of kcopyd.
  250. */
  251. static struct page_list *alloc_pl(gfp_t gfp, unsigned long job_flags)
  252. {
  253. struct page_list *pl;
  254. pl = kmalloc(sizeof(*pl), gfp);
  255. if (!pl)
  256. return NULL;
  257. if (rsm_enabled && test_bit(DM_KCOPYD_SNAP_MERGE, &job_flags)) {
  258. kcopyd_rsm_get_page(&pl->page);
  259. } else {
  260. pl->page = alloc_page(gfp);
  261. }
  262. if (!pl->page) {
  263. kfree(pl);
  264. return NULL;
  265. }
  266. return pl;
  267. }
  268. static void free_pl(struct page_list *pl)
  269. {
  270. struct page *p = pl->page;
  271. phys_addr_t pa = page_to_phys(p);
  272. if (rsm_enabled && pa >= rsm_mem_base && pa < rsm_mem_base + rsm_mem_size)
  273. kcopyd_rsm_drop_page(&pl->page);
  274. else
  275. __free_page(pl->page);
  276. kfree(pl);
  277. }
  278. /*
  279. * Add the provided pages to a client's free page list, releasing
  280. * back to the system any beyond the reserved_pages limit.
  281. */
  282. static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
  283. {
  284. struct page_list *next;
  285. do {
  286. next = pl->next;
  287. if (kc->nr_free_pages >= kc->nr_reserved_pages)
  288. free_pl(pl);
  289. else {
  290. pl->next = kc->pages;
  291. kc->pages = pl;
  292. kc->nr_free_pages++;
  293. }
  294. pl = next;
  295. } while (pl);
  296. }
  297. static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
  298. unsigned int nr, struct page_list **pages,
  299. unsigned long job_flags)
  300. {
  301. struct page_list *pl;
  302. *pages = NULL;
  303. do {
  304. pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM, job_flags);
  305. if (unlikely(!pl)) {
  306. /* Use reserved pages */
  307. pl = kc->pages;
  308. if (unlikely(!pl))
  309. goto out_of_memory;
  310. kc->pages = pl->next;
  311. kc->nr_free_pages--;
  312. }
  313. pl->next = *pages;
  314. *pages = pl;
  315. } while (--nr);
  316. return 0;
  317. out_of_memory:
  318. if (*pages)
  319. kcopyd_put_pages(kc, *pages);
  320. return -ENOMEM;
  321. }
  322. /*
  323. * These three functions resize the page pool.
  324. */
  325. static void drop_pages(struct page_list *pl)
  326. {
  327. struct page_list *next;
  328. while (pl) {
  329. next = pl->next;
  330. free_pl(pl);
  331. pl = next;
  332. }
  333. }
  334. /*
  335. * Allocate and reserve nr_pages for the use of a specific client.
  336. */
  337. static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
  338. {
  339. unsigned i;
  340. struct page_list *pl = NULL, *next;
  341. for (i = 0; i < nr_pages; i++) {
  342. next = alloc_pl(GFP_KERNEL, 0);
  343. if (!next) {
  344. if (pl)
  345. drop_pages(pl);
  346. return -ENOMEM;
  347. }
  348. next->next = pl;
  349. pl = next;
  350. }
  351. kc->nr_reserved_pages += nr_pages;
  352. kcopyd_put_pages(kc, pl);
  353. return 0;
  354. }
  355. static void client_free_pages(struct dm_kcopyd_client *kc)
  356. {
  357. BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
  358. drop_pages(kc->pages);
  359. kc->pages = NULL;
  360. kc->nr_free_pages = kc->nr_reserved_pages = 0;
  361. }
  362. /*-----------------------------------------------------------------
  363. * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
  364. * for this reason we use a mempool to prevent the client from
  365. * ever having to do io (which could cause a deadlock).
  366. *---------------------------------------------------------------*/
  367. struct kcopyd_job {
  368. struct dm_kcopyd_client *kc;
  369. struct list_head list;
  370. unsigned long flags;
  371. /*
  372. * Error state of the job.
  373. */
  374. int read_err;
  375. unsigned long write_err;
  376. /*
  377. * Either READ or WRITE
  378. */
  379. int rw;
  380. struct dm_io_region source;
  381. /*
  382. * The destinations for the transfer.
  383. */
  384. unsigned int num_dests;
  385. struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
  386. struct page_list *pages;
  387. /*
  388. * Set this to ensure you are notified when the job has
  389. * completed. 'context' is for callback to use.
  390. */
  391. dm_kcopyd_notify_fn fn;
  392. void *context;
  393. /*
  394. * These fields are only used if the job has been split
  395. * into more manageable parts.
  396. */
  397. struct mutex lock;
  398. atomic_t sub_jobs;
  399. sector_t progress;
  400. sector_t write_offset;
  401. struct kcopyd_job *master_job;
  402. };
  403. static struct kmem_cache *_job_cache;
  404. int __init dm_kcopyd_init(void)
  405. {
  406. _job_cache = kmem_cache_create("kcopyd_job",
  407. sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1),
  408. __alignof__(struct kcopyd_job), 0, NULL);
  409. if (!_job_cache)
  410. return -ENOMEM;
  411. zero_page_list.next = &zero_page_list;
  412. zero_page_list.page = ZERO_PAGE(0);
  413. kcopyd_rsm_init();
  414. return 0;
  415. }
  416. void dm_kcopyd_exit(void)
  417. {
  418. kmem_cache_destroy(_job_cache);
  419. _job_cache = NULL;
  420. kcopyd_rsm_destroy();
  421. }
  422. /*
  423. * Functions to push and pop a job onto the head of a given job
  424. * list.
  425. */
  426. static struct kcopyd_job *pop_io_job(struct list_head *jobs,
  427. struct dm_kcopyd_client *kc)
  428. {
  429. struct kcopyd_job *job;
  430. /*
  431. * For I/O jobs, pop any read, any write without sequential write
  432. * constraint and sequential writes that are at the right position.
  433. */
  434. list_for_each_entry(job, jobs, list) {
  435. if (job->rw == READ || !test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) {
  436. list_del(&job->list);
  437. return job;
  438. }
  439. if (job->write_offset == job->master_job->write_offset) {
  440. job->master_job->write_offset += job->source.count;
  441. list_del(&job->list);
  442. return job;
  443. }
  444. }
  445. return NULL;
  446. }
  447. static struct kcopyd_job *pop(struct list_head *jobs,
  448. struct dm_kcopyd_client *kc)
  449. {
  450. struct kcopyd_job *job = NULL;
  451. unsigned long flags;
  452. spin_lock_irqsave(&kc->job_lock, flags);
  453. if (!list_empty(jobs)) {
  454. if (jobs == &kc->io_jobs)
  455. job = pop_io_job(jobs, kc);
  456. else {
  457. job = list_entry(jobs->next, struct kcopyd_job, list);
  458. list_del(&job->list);
  459. }
  460. }
  461. spin_unlock_irqrestore(&kc->job_lock, flags);
  462. return job;
  463. }
  464. static void push(struct list_head *jobs, struct kcopyd_job *job)
  465. {
  466. unsigned long flags;
  467. struct dm_kcopyd_client *kc = job->kc;
  468. spin_lock_irqsave(&kc->job_lock, flags);
  469. list_add_tail(&job->list, jobs);
  470. spin_unlock_irqrestore(&kc->job_lock, flags);
  471. }
  472. static void push_head(struct list_head *jobs, struct kcopyd_job *job)
  473. {
  474. unsigned long flags;
  475. struct dm_kcopyd_client *kc = job->kc;
  476. spin_lock_irqsave(&kc->job_lock, flags);
  477. list_add(&job->list, jobs);
  478. spin_unlock_irqrestore(&kc->job_lock, flags);
  479. }
  480. /*
  481. * These three functions process 1 item from the corresponding
  482. * job list.
  483. *
  484. * They return:
  485. * < 0: error
  486. * 0: success
  487. * > 0: can't process yet.
  488. */
  489. static int run_complete_job(struct kcopyd_job *job)
  490. {
  491. void *context = job->context;
  492. int read_err = job->read_err;
  493. unsigned long write_err = job->write_err;
  494. dm_kcopyd_notify_fn fn = job->fn;
  495. struct dm_kcopyd_client *kc = job->kc;
  496. if (job->pages && job->pages != &zero_page_list)
  497. kcopyd_put_pages(kc, job->pages);
  498. /*
  499. * If this is the master job, the sub jobs have already
  500. * completed so we can free everything.
  501. */
  502. if (job->master_job == job) {
  503. mutex_destroy(&job->lock);
  504. mempool_free(job, &kc->job_pool);
  505. }
  506. fn(read_err, write_err, context);
  507. if (atomic_dec_and_test(&kc->nr_jobs))
  508. wake_up(&kc->destroyq);
  509. cond_resched();
  510. return 0;
  511. }
  512. static void complete_io(unsigned long error, void *context)
  513. {
  514. struct kcopyd_job *job = (struct kcopyd_job *) context;
  515. struct dm_kcopyd_client *kc = job->kc;
  516. io_job_finish(kc->throttle);
  517. if (error) {
  518. if (op_is_write(job->rw))
  519. job->write_err |= error;
  520. else
  521. job->read_err = 1;
  522. if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
  523. push(&kc->complete_jobs, job);
  524. wake(kc);
  525. return;
  526. }
  527. }
  528. if (op_is_write(job->rw))
  529. push(&kc->complete_jobs, job);
  530. else {
  531. job->rw = WRITE;
  532. push(&kc->io_jobs, job);
  533. }
  534. wake(kc);
  535. }
  536. /*
  537. * Request io on as many buffer heads as we can currently get for
  538. * a particular job.
  539. */
  540. static int run_io_job(struct kcopyd_job *job)
  541. {
  542. int r;
  543. struct dm_io_request io_req = {
  544. .bi_op = job->rw,
  545. .bi_op_flags = 0,
  546. .mem.type = DM_IO_PAGE_LIST,
  547. .mem.ptr.pl = job->pages,
  548. .mem.offset = 0,
  549. .notify.fn = complete_io,
  550. .notify.context = job,
  551. .client = job->kc->io_client,
  552. };
  553. /*
  554. * If we need to write sequentially and some reads or writes failed,
  555. * no point in continuing.
  556. */
  557. if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
  558. job->master_job->write_err) {
  559. job->write_err = job->master_job->write_err;
  560. return -EIO;
  561. }
  562. io_job_start(job->kc->throttle);
  563. if (job->rw == READ)
  564. r = dm_io(&io_req, 1, &job->source, NULL);
  565. else
  566. r = dm_io(&io_req, job->num_dests, job->dests, NULL);
  567. return r;
  568. }
  569. static int run_pages_job(struct kcopyd_job *job)
  570. {
  571. int r;
  572. unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
  573. r = kcopyd_get_pages(job->kc, nr_pages, &job->pages, job->flags);
  574. if (!r) {
  575. /* this job is ready for io */
  576. push(&job->kc->io_jobs, job);
  577. return 0;
  578. }
  579. if (r == -ENOMEM)
  580. /* can't complete now */
  581. return 1;
  582. return r;
  583. }
  584. /*
  585. * Run through a list for as long as possible. Returns the count
  586. * of successful jobs.
  587. */
  588. static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
  589. int (*fn) (struct kcopyd_job *))
  590. {
  591. struct kcopyd_job *job;
  592. int r, count = 0;
  593. while ((job = pop(jobs, kc))) {
  594. r = fn(job);
  595. if (r < 0) {
  596. /* error this rogue job */
  597. if (op_is_write(job->rw))
  598. job->write_err = (unsigned long) -1L;
  599. else
  600. job->read_err = 1;
  601. push(&kc->complete_jobs, job);
  602. wake(kc);
  603. break;
  604. }
  605. if (r > 0) {
  606. /*
  607. * We couldn't service this job ATM, so
  608. * push this job back onto the list.
  609. */
  610. push_head(jobs, job);
  611. break;
  612. }
  613. count++;
  614. }
  615. return count;
  616. }
  617. /*
  618. * kcopyd does this every time it's woken up.
  619. */
  620. static void do_work(struct work_struct *work)
  621. {
  622. struct dm_kcopyd_client *kc = container_of(work,
  623. struct dm_kcopyd_client, kcopyd_work);
  624. struct blk_plug plug;
  625. unsigned long flags;
  626. /*
  627. * The order that these are called is *very* important.
  628. * complete jobs can free some pages for pages jobs.
  629. * Pages jobs when successful will jump onto the io jobs
  630. * list. io jobs call wake when they complete and it all
  631. * starts again.
  632. */
  633. spin_lock_irqsave(&kc->job_lock, flags);
  634. list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
  635. spin_unlock_irqrestore(&kc->job_lock, flags);
  636. blk_start_plug(&plug);
  637. process_jobs(&kc->complete_jobs, kc, run_complete_job);
  638. process_jobs(&kc->pages_jobs, kc, run_pages_job);
  639. process_jobs(&kc->io_jobs, kc, run_io_job);
  640. blk_finish_plug(&plug);
  641. }
  642. /*
  643. * If we are copying a small region we just dispatch a single job
  644. * to do the copy, otherwise the io has to be split up into many
  645. * jobs.
  646. */
  647. static void dispatch_job(struct kcopyd_job *job)
  648. {
  649. struct dm_kcopyd_client *kc = job->kc;
  650. atomic_inc(&kc->nr_jobs);
  651. if (unlikely(!job->source.count))
  652. push(&kc->callback_jobs, job);
  653. else if (job->pages == &zero_page_list)
  654. push(&kc->io_jobs, job);
  655. else
  656. push(&kc->pages_jobs, job);
  657. wake(kc);
  658. }
  659. static void segment_complete(int read_err, unsigned long write_err,
  660. void *context)
  661. {
  662. /* FIXME: tidy this function */
  663. sector_t progress = 0;
  664. sector_t count = 0;
  665. struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
  666. struct kcopyd_job *job = sub_job->master_job;
  667. struct dm_kcopyd_client *kc = job->kc;
  668. mutex_lock(&job->lock);
  669. /* update the error */
  670. if (read_err)
  671. job->read_err = 1;
  672. if (write_err)
  673. job->write_err |= write_err;
  674. /*
  675. * Only dispatch more work if there hasn't been an error.
  676. */
  677. if ((!job->read_err && !job->write_err) ||
  678. test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
  679. /* get the next chunk of work */
  680. progress = job->progress;
  681. count = job->source.count - progress;
  682. if (count) {
  683. if (count > kc->sub_job_size)
  684. count = kc->sub_job_size;
  685. job->progress += count;
  686. }
  687. }
  688. mutex_unlock(&job->lock);
  689. if (count) {
  690. int i;
  691. *sub_job = *job;
  692. sub_job->write_offset = progress;
  693. sub_job->source.sector += progress;
  694. sub_job->source.count = count;
  695. for (i = 0; i < job->num_dests; i++) {
  696. sub_job->dests[i].sector += progress;
  697. sub_job->dests[i].count = count;
  698. }
  699. sub_job->fn = segment_complete;
  700. sub_job->context = sub_job;
  701. dispatch_job(sub_job);
  702. } else if (atomic_dec_and_test(&job->sub_jobs)) {
  703. /*
  704. * Queue the completion callback to the kcopyd thread.
  705. *
  706. * Some callers assume that all the completions are called
  707. * from a single thread and don't race with each other.
  708. *
  709. * We must not call the callback directly here because this
  710. * code may not be executing in the thread.
  711. */
  712. push(&kc->complete_jobs, job);
  713. wake(kc);
  714. }
  715. }
  716. /*
  717. * Create some sub jobs to share the work between them.
  718. */
  719. static void split_job(struct kcopyd_job *master_job)
  720. {
  721. int i;
  722. atomic_inc(&master_job->kc->nr_jobs);
  723. atomic_set(&master_job->sub_jobs, SPLIT_COUNT);
  724. for (i = 0; i < SPLIT_COUNT; i++) {
  725. master_job[i + 1].master_job = master_job;
  726. segment_complete(0, 0u, &master_job[i + 1]);
  727. }
  728. }
  729. void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
  730. unsigned int num_dests, struct dm_io_region *dests,
  731. unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
  732. {
  733. struct kcopyd_job *job;
  734. int i;
  735. /*
  736. * Allocate an array of jobs consisting of one master job
  737. * followed by SPLIT_COUNT sub jobs.
  738. */
  739. job = mempool_alloc(&kc->job_pool, GFP_NOIO);
  740. mutex_init(&job->lock);
  741. /*
  742. * set up for the read.
  743. */
  744. job->kc = kc;
  745. job->flags = flags;
  746. job->read_err = 0;
  747. job->write_err = 0;
  748. job->num_dests = num_dests;
  749. memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
  750. /*
  751. * If one of the destination is a host-managed zoned block device,
  752. * we need to write sequentially. If one of the destination is a
  753. * host-aware device, then leave it to the caller to choose what to do.
  754. */
  755. if (!test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) {
  756. for (i = 0; i < job->num_dests; i++) {
  757. if (bdev_zoned_model(dests[i].bdev) == BLK_ZONED_HM) {
  758. set_bit(DM_KCOPYD_WRITE_SEQ, &job->flags);
  759. break;
  760. }
  761. }
  762. }
  763. /*
  764. * If we need to write sequentially, errors cannot be ignored.
  765. */
  766. if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
  767. test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags))
  768. clear_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags);
  769. if (from) {
  770. job->source = *from;
  771. job->pages = NULL;
  772. job->rw = READ;
  773. } else {
  774. memset(&job->source, 0, sizeof job->source);
  775. job->source.count = job->dests[0].count;
  776. job->pages = &zero_page_list;
  777. /*
  778. * Use WRITE ZEROES to optimize zeroing if all dests support it.
  779. */
  780. job->rw = REQ_OP_WRITE_ZEROES;
  781. for (i = 0; i < job->num_dests; i++)
  782. if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) {
  783. job->rw = WRITE;
  784. break;
  785. }
  786. }
  787. job->fn = fn;
  788. job->context = context;
  789. job->master_job = job;
  790. job->write_offset = 0;
  791. if (job->source.count <= kc->sub_job_size)
  792. dispatch_job(job);
  793. else {
  794. job->progress = 0;
  795. split_job(job);
  796. }
  797. }
  798. EXPORT_SYMBOL(dm_kcopyd_copy);
  799. void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
  800. unsigned num_dests, struct dm_io_region *dests,
  801. unsigned flags, dm_kcopyd_notify_fn fn, void *context)
  802. {
  803. dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
  804. }
  805. EXPORT_SYMBOL(dm_kcopyd_zero);
  806. void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
  807. dm_kcopyd_notify_fn fn, void *context)
  808. {
  809. struct kcopyd_job *job;
  810. job = mempool_alloc(&kc->job_pool, GFP_NOIO);
  811. memset(job, 0, sizeof(struct kcopyd_job));
  812. job->kc = kc;
  813. job->fn = fn;
  814. job->context = context;
  815. job->master_job = job;
  816. atomic_inc(&kc->nr_jobs);
  817. return job;
  818. }
  819. EXPORT_SYMBOL(dm_kcopyd_prepare_callback);
  820. void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
  821. {
  822. struct kcopyd_job *job = j;
  823. struct dm_kcopyd_client *kc = job->kc;
  824. job->read_err = read_err;
  825. job->write_err = write_err;
  826. push(&kc->callback_jobs, job);
  827. wake(kc);
  828. }
  829. EXPORT_SYMBOL(dm_kcopyd_do_callback);
  830. /*
  831. * Cancels a kcopyd job, eg. someone might be deactivating a
  832. * mirror.
  833. */
  834. #if 0
  835. int kcopyd_cancel(struct kcopyd_job *job, int block)
  836. {
  837. /* FIXME: finish */
  838. return -1;
  839. }
  840. #endif /* 0 */
  841. /*-----------------------------------------------------------------
  842. * Client setup
  843. *---------------------------------------------------------------*/
  844. struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
  845. {
  846. int r;
  847. unsigned reserve_pages;
  848. struct dm_kcopyd_client *kc;
  849. kc = kzalloc(sizeof(*kc), GFP_KERNEL);
  850. if (!kc)
  851. return ERR_PTR(-ENOMEM);
  852. spin_lock_init(&kc->job_lock);
  853. INIT_LIST_HEAD(&kc->callback_jobs);
  854. INIT_LIST_HEAD(&kc->complete_jobs);
  855. INIT_LIST_HEAD(&kc->io_jobs);
  856. INIT_LIST_HEAD(&kc->pages_jobs);
  857. kc->throttle = throttle;
  858. r = mempool_init_slab_pool(&kc->job_pool, MIN_JOBS, _job_cache);
  859. if (r)
  860. goto bad_slab;
  861. INIT_WORK(&kc->kcopyd_work, do_work);
  862. kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0);
  863. if (!kc->kcopyd_wq) {
  864. r = -ENOMEM;
  865. goto bad_workqueue;
  866. }
  867. kc->sub_job_size = dm_get_kcopyd_subjob_size();
  868. reserve_pages = DIV_ROUND_UP(kc->sub_job_size << SECTOR_SHIFT, PAGE_SIZE);
  869. kc->pages = NULL;
  870. kc->nr_reserved_pages = kc->nr_free_pages = 0;
  871. r = client_reserve_pages(kc, reserve_pages);
  872. if (r)
  873. goto bad_client_pages;
  874. kc->io_client = dm_io_client_create();
  875. if (IS_ERR(kc->io_client)) {
  876. r = PTR_ERR(kc->io_client);
  877. goto bad_io_client;
  878. }
  879. init_waitqueue_head(&kc->destroyq);
  880. atomic_set(&kc->nr_jobs, 0);
  881. return kc;
  882. bad_io_client:
  883. client_free_pages(kc);
  884. bad_client_pages:
  885. destroy_workqueue(kc->kcopyd_wq);
  886. bad_workqueue:
  887. mempool_exit(&kc->job_pool);
  888. bad_slab:
  889. kfree(kc);
  890. return ERR_PTR(r);
  891. }
  892. EXPORT_SYMBOL(dm_kcopyd_client_create);
  893. void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
  894. {
  895. /* Wait for completion of all jobs submitted by this client. */
  896. wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
  897. BUG_ON(!list_empty(&kc->callback_jobs));
  898. BUG_ON(!list_empty(&kc->complete_jobs));
  899. BUG_ON(!list_empty(&kc->io_jobs));
  900. BUG_ON(!list_empty(&kc->pages_jobs));
  901. destroy_workqueue(kc->kcopyd_wq);
  902. dm_io_client_destroy(kc->io_client);
  903. client_free_pages(kc);
  904. mempool_exit(&kc->job_pool);
  905. kfree(kc);
  906. }
  907. EXPORT_SYMBOL(dm_kcopyd_client_destroy);