dmatest.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * DMA Engine test module
  4. *
  5. * Copyright (C) 2007 Atmel Corporation
  6. * Copyright (C) 2013 Intel Corporation
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/err.h>
  10. #include <linux/delay.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/dmaengine.h>
  13. #include <linux/freezer.h>
  14. #include <linux/init.h>
  15. #include <linux/kthread.h>
  16. #include <linux/sched/task.h>
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/random.h>
  20. #include <linux/slab.h>
  21. #include <linux/wait.h>
  22. static unsigned int test_buf_size = 16384;
  23. module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
  24. MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
  25. static char test_device[32];
  26. module_param_string(device, test_device, sizeof(test_device),
  27. S_IRUGO | S_IWUSR);
  28. MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
  29. static unsigned int threads_per_chan = 1;
  30. module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
  31. MODULE_PARM_DESC(threads_per_chan,
  32. "Number of threads to start per channel (default: 1)");
  33. static unsigned int max_channels;
  34. module_param(max_channels, uint, S_IRUGO | S_IWUSR);
  35. MODULE_PARM_DESC(max_channels,
  36. "Maximum number of channels to use (default: all)");
  37. static unsigned int iterations;
  38. module_param(iterations, uint, S_IRUGO | S_IWUSR);
  39. MODULE_PARM_DESC(iterations,
  40. "Iterations before stopping test (default: infinite)");
  41. static unsigned int dmatest;
  42. module_param(dmatest, uint, S_IRUGO | S_IWUSR);
  43. MODULE_PARM_DESC(dmatest,
  44. "dmatest 0-memcpy 1-memset (default: 0)");
  45. static unsigned int xor_sources = 3;
  46. module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
  47. MODULE_PARM_DESC(xor_sources,
  48. "Number of xor source buffers (default: 3)");
  49. static unsigned int pq_sources = 3;
  50. module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
  51. MODULE_PARM_DESC(pq_sources,
  52. "Number of p+q source buffers (default: 3)");
  53. static int timeout = 3000;
  54. module_param(timeout, int, S_IRUGO | S_IWUSR);
  55. MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
  56. "Pass -1 for infinite timeout");
  57. static bool noverify;
  58. module_param(noverify, bool, S_IRUGO | S_IWUSR);
  59. MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)");
  60. static bool norandom;
  61. module_param(norandom, bool, 0644);
  62. MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
  63. static bool verbose;
  64. module_param(verbose, bool, S_IRUGO | S_IWUSR);
  65. MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
  66. static int alignment = -1;
  67. module_param(alignment, int, 0644);
  68. MODULE_PARM_DESC(alignment, "Custom data address alignment taken as 2^(alignment) (default: not used (-1))");
  69. static unsigned int transfer_size;
  70. module_param(transfer_size, uint, 0644);
  71. MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))");
  72. static bool polled;
  73. module_param(polled, bool, S_IRUGO | S_IWUSR);
  74. MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
  75. /**
  76. * struct dmatest_params - test parameters.
  77. * @buf_size: size of the memcpy test buffer
  78. * @channel: bus ID of the channel to test
  79. * @device: bus ID of the DMA Engine to test
  80. * @threads_per_chan: number of threads to start per channel
  81. * @max_channels: maximum number of channels to use
  82. * @iterations: iterations before stopping test
  83. * @xor_sources: number of xor source buffers
  84. * @pq_sources: number of p+q source buffers
  85. * @timeout: transfer timeout in msec, -1 for infinite timeout
  86. * @noverify: disable data verification
  87. * @norandom: disable random offset setup
  88. * @alignment: custom data address alignment taken as 2^alignment
  89. * @transfer_size: custom transfer size in bytes
  90. * @polled: use polling for completion instead of interrupts
  91. */
  92. struct dmatest_params {
  93. unsigned int buf_size;
  94. char channel[20];
  95. char device[32];
  96. unsigned int threads_per_chan;
  97. unsigned int max_channels;
  98. unsigned int iterations;
  99. unsigned int xor_sources;
  100. unsigned int pq_sources;
  101. int timeout;
  102. bool noverify;
  103. bool norandom;
  104. int alignment;
  105. unsigned int transfer_size;
  106. bool polled;
  107. };
  108. /**
  109. * struct dmatest_info - test information.
  110. * @params: test parameters
  111. * @channels: channels under test
  112. * @nr_channels: number of channels under test
  113. * @lock: access protection to the fields of this structure
  114. * @did_init: module has been initialized completely
  115. * @last_error: test has faced configuration issues
  116. */
  117. static struct dmatest_info {
  118. /* Test parameters */
  119. struct dmatest_params params;
  120. /* Internal state */
  121. struct list_head channels;
  122. unsigned int nr_channels;
  123. int last_error;
  124. struct mutex lock;
  125. bool did_init;
  126. } test_info = {
  127. .channels = LIST_HEAD_INIT(test_info.channels),
  128. .lock = __MUTEX_INITIALIZER(test_info.lock),
  129. };
  130. static int dmatest_run_set(const char *val, const struct kernel_param *kp);
  131. static int dmatest_run_get(char *val, const struct kernel_param *kp);
  132. static const struct kernel_param_ops run_ops = {
  133. .set = dmatest_run_set,
  134. .get = dmatest_run_get,
  135. };
  136. static bool dmatest_run;
  137. module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
  138. MODULE_PARM_DESC(run, "Run the test (default: false)");
  139. static int dmatest_chan_set(const char *val, const struct kernel_param *kp);
  140. static int dmatest_chan_get(char *val, const struct kernel_param *kp);
  141. static const struct kernel_param_ops multi_chan_ops = {
  142. .set = dmatest_chan_set,
  143. .get = dmatest_chan_get,
  144. };
  145. static char test_channel[20];
  146. static struct kparam_string newchan_kps = {
  147. .string = test_channel,
  148. .maxlen = 20,
  149. };
  150. module_param_cb(channel, &multi_chan_ops, &newchan_kps, 0644);
  151. MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
  152. static int dmatest_test_list_get(char *val, const struct kernel_param *kp);
  153. static const struct kernel_param_ops test_list_ops = {
  154. .get = dmatest_test_list_get,
  155. };
  156. module_param_cb(test_list, &test_list_ops, NULL, 0444);
  157. MODULE_PARM_DESC(test_list, "Print current test list");
  158. /* Maximum amount of mismatched bytes in buffer to print */
  159. #define MAX_ERROR_COUNT 32
  160. /*
  161. * Initialization patterns. All bytes in the source buffer has bit 7
  162. * set, all bytes in the destination buffer has bit 7 cleared.
  163. *
  164. * Bit 6 is set for all bytes which are to be copied by the DMA
  165. * engine. Bit 5 is set for all bytes which are to be overwritten by
  166. * the DMA engine.
  167. *
  168. * The remaining bits are the inverse of a counter which increments by
  169. * one for each byte address.
  170. */
  171. #define PATTERN_SRC 0x80
  172. #define PATTERN_DST 0x00
  173. #define PATTERN_COPY 0x40
  174. #define PATTERN_OVERWRITE 0x20
  175. #define PATTERN_COUNT_MASK 0x1f
  176. #define PATTERN_MEMSET_IDX 0x01
  177. /* Fixed point arithmetic ops */
  178. #define FIXPT_SHIFT 8
  179. #define FIXPNT_MASK 0xFF
  180. #define FIXPT_TO_INT(a) ((a) >> FIXPT_SHIFT)
  181. #define INT_TO_FIXPT(a) ((a) << FIXPT_SHIFT)
  182. #define FIXPT_GET_FRAC(a) ((((a) & FIXPNT_MASK) * 100) >> FIXPT_SHIFT)
  183. /* poor man's completion - we want to use wait_event_freezable() on it */
  184. struct dmatest_done {
  185. bool done;
  186. wait_queue_head_t *wait;
  187. };
  188. struct dmatest_data {
  189. u8 **raw;
  190. u8 **aligned;
  191. unsigned int cnt;
  192. unsigned int off;
  193. };
  194. struct dmatest_thread {
  195. struct list_head node;
  196. struct dmatest_info *info;
  197. struct task_struct *task;
  198. struct dma_chan *chan;
  199. struct dmatest_data src;
  200. struct dmatest_data dst;
  201. enum dma_transaction_type type;
  202. wait_queue_head_t done_wait;
  203. struct dmatest_done test_done;
  204. bool done;
  205. bool pending;
  206. };
  207. struct dmatest_chan {
  208. struct list_head node;
  209. struct dma_chan *chan;
  210. struct list_head threads;
  211. };
  212. static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
  213. static bool wait;
  214. static bool is_threaded_test_run(struct dmatest_info *info)
  215. {
  216. struct dmatest_chan *dtc;
  217. list_for_each_entry(dtc, &info->channels, node) {
  218. struct dmatest_thread *thread;
  219. list_for_each_entry(thread, &dtc->threads, node) {
  220. if (!thread->done && !thread->pending)
  221. return true;
  222. }
  223. }
  224. return false;
  225. }
  226. static bool is_threaded_test_pending(struct dmatest_info *info)
  227. {
  228. struct dmatest_chan *dtc;
  229. list_for_each_entry(dtc, &info->channels, node) {
  230. struct dmatest_thread *thread;
  231. list_for_each_entry(thread, &dtc->threads, node) {
  232. if (thread->pending)
  233. return true;
  234. }
  235. }
  236. return false;
  237. }
  238. static int dmatest_wait_get(char *val, const struct kernel_param *kp)
  239. {
  240. struct dmatest_info *info = &test_info;
  241. struct dmatest_params *params = &info->params;
  242. if (params->iterations)
  243. wait_event(thread_wait, !is_threaded_test_run(info));
  244. wait = true;
  245. return param_get_bool(val, kp);
  246. }
  247. static const struct kernel_param_ops wait_ops = {
  248. .get = dmatest_wait_get,
  249. .set = param_set_bool,
  250. };
  251. module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
  252. MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
  253. static bool dmatest_match_channel(struct dmatest_params *params,
  254. struct dma_chan *chan)
  255. {
  256. if (params->channel[0] == '\0')
  257. return true;
  258. return strcmp(dma_chan_name(chan), params->channel) == 0;
  259. }
  260. static bool dmatest_match_device(struct dmatest_params *params,
  261. struct dma_device *device)
  262. {
  263. if (params->device[0] == '\0')
  264. return true;
  265. return strcmp(dev_name(device->dev), params->device) == 0;
  266. }
  267. static unsigned long dmatest_random(void)
  268. {
  269. unsigned long buf;
  270. prandom_bytes(&buf, sizeof(buf));
  271. return buf;
  272. }
  273. static inline u8 gen_inv_idx(u8 index, bool is_memset)
  274. {
  275. u8 val = is_memset ? PATTERN_MEMSET_IDX : index;
  276. return ~val & PATTERN_COUNT_MASK;
  277. }
  278. static inline u8 gen_src_value(u8 index, bool is_memset)
  279. {
  280. return PATTERN_SRC | gen_inv_idx(index, is_memset);
  281. }
  282. static inline u8 gen_dst_value(u8 index, bool is_memset)
  283. {
  284. return PATTERN_DST | gen_inv_idx(index, is_memset);
  285. }
  286. static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
  287. unsigned int buf_size, bool is_memset)
  288. {
  289. unsigned int i;
  290. u8 *buf;
  291. for (; (buf = *bufs); bufs++) {
  292. for (i = 0; i < start; i++)
  293. buf[i] = gen_src_value(i, is_memset);
  294. for ( ; i < start + len; i++)
  295. buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY;
  296. for ( ; i < buf_size; i++)
  297. buf[i] = gen_src_value(i, is_memset);
  298. buf++;
  299. }
  300. }
  301. static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
  302. unsigned int buf_size, bool is_memset)
  303. {
  304. unsigned int i;
  305. u8 *buf;
  306. for (; (buf = *bufs); bufs++) {
  307. for (i = 0; i < start; i++)
  308. buf[i] = gen_dst_value(i, is_memset);
  309. for ( ; i < start + len; i++)
  310. buf[i] = gen_dst_value(i, is_memset) |
  311. PATTERN_OVERWRITE;
  312. for ( ; i < buf_size; i++)
  313. buf[i] = gen_dst_value(i, is_memset);
  314. }
  315. }
  316. static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
  317. unsigned int counter, bool is_srcbuf, bool is_memset)
  318. {
  319. u8 diff = actual ^ pattern;
  320. u8 expected = pattern | gen_inv_idx(counter, is_memset);
  321. const char *thread_name = current->comm;
  322. if (is_srcbuf)
  323. pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
  324. thread_name, index, expected, actual);
  325. else if ((pattern & PATTERN_COPY)
  326. && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
  327. pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
  328. thread_name, index, expected, actual);
  329. else if (diff & PATTERN_SRC)
  330. pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
  331. thread_name, index, expected, actual);
  332. else
  333. pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
  334. thread_name, index, expected, actual);
  335. }
  336. static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
  337. unsigned int end, unsigned int counter, u8 pattern,
  338. bool is_srcbuf, bool is_memset)
  339. {
  340. unsigned int i;
  341. unsigned int error_count = 0;
  342. u8 actual;
  343. u8 expected;
  344. u8 *buf;
  345. unsigned int counter_orig = counter;
  346. for (; (buf = *bufs); bufs++) {
  347. counter = counter_orig;
  348. for (i = start; i < end; i++) {
  349. actual = buf[i];
  350. expected = pattern | gen_inv_idx(counter, is_memset);
  351. if (actual != expected) {
  352. if (error_count < MAX_ERROR_COUNT)
  353. dmatest_mismatch(actual, pattern, i,
  354. counter, is_srcbuf,
  355. is_memset);
  356. error_count++;
  357. }
  358. counter++;
  359. }
  360. }
  361. if (error_count > MAX_ERROR_COUNT)
  362. pr_warn("%s: %u errors suppressed\n",
  363. current->comm, error_count - MAX_ERROR_COUNT);
  364. return error_count;
  365. }
  366. static void dmatest_callback(void *arg)
  367. {
  368. struct dmatest_done *done = arg;
  369. struct dmatest_thread *thread =
  370. container_of(done, struct dmatest_thread, test_done);
  371. if (!thread->done) {
  372. done->done = true;
  373. wake_up_all(done->wait);
  374. } else {
  375. /*
  376. * If thread->done, it means that this callback occurred
  377. * after the parent thread has cleaned up. This can
  378. * happen in the case that driver doesn't implement
  379. * the terminate_all() functionality and a dma operation
  380. * did not occur within the timeout period
  381. */
  382. WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
  383. }
  384. }
  385. static unsigned int min_odd(unsigned int x, unsigned int y)
  386. {
  387. unsigned int val = min(x, y);
  388. return val % 2 ? val : val - 1;
  389. }
  390. static void result(const char *err, unsigned int n, unsigned int src_off,
  391. unsigned int dst_off, unsigned int len, unsigned long data)
  392. {
  393. if (IS_ERR_VALUE(data)) {
  394. pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%ld)\n",
  395. current->comm, n, err, src_off, dst_off, len, data);
  396. } else {
  397. pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
  398. current->comm, n, err, src_off, dst_off, len, data);
  399. }
  400. }
  401. static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
  402. unsigned int dst_off, unsigned int len,
  403. unsigned long data)
  404. {
  405. pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
  406. current->comm, n, err, src_off, dst_off, len, data);
  407. }
  408. #define verbose_result(err, n, src_off, dst_off, len, data) ({ \
  409. if (verbose) \
  410. result(err, n, src_off, dst_off, len, data); \
  411. else \
  412. dbg_result(err, n, src_off, dst_off, len, data);\
  413. })
  414. static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
  415. {
  416. unsigned long long per_sec = 1000000;
  417. if (runtime <= 0)
  418. return 0;
  419. /* drop precision until runtime is 32-bits */
  420. while (runtime > UINT_MAX) {
  421. runtime >>= 1;
  422. per_sec <<= 1;
  423. }
  424. per_sec *= val;
  425. per_sec = INT_TO_FIXPT(per_sec);
  426. do_div(per_sec, runtime);
  427. return per_sec;
  428. }
  429. static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
  430. {
  431. return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10));
  432. }
  433. static void __dmatest_free_test_data(struct dmatest_data *d, unsigned int cnt)
  434. {
  435. unsigned int i;
  436. for (i = 0; i < cnt; i++)
  437. kfree(d->raw[i]);
  438. kfree(d->aligned);
  439. kfree(d->raw);
  440. }
  441. static void dmatest_free_test_data(struct dmatest_data *d)
  442. {
  443. __dmatest_free_test_data(d, d->cnt);
  444. }
  445. static int dmatest_alloc_test_data(struct dmatest_data *d,
  446. unsigned int buf_size, u8 align)
  447. {
  448. unsigned int i = 0;
  449. d->raw = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
  450. if (!d->raw)
  451. return -ENOMEM;
  452. d->aligned = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
  453. if (!d->aligned)
  454. goto err;
  455. for (i = 0; i < d->cnt; i++) {
  456. d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
  457. if (!d->raw[i])
  458. goto err;
  459. /* align to alignment restriction */
  460. if (align)
  461. d->aligned[i] = PTR_ALIGN(d->raw[i], align);
  462. else
  463. d->aligned[i] = d->raw[i];
  464. }
  465. return 0;
  466. err:
  467. __dmatest_free_test_data(d, i);
  468. return -ENOMEM;
  469. }
  470. /*
  471. * This function repeatedly tests DMA transfers of various lengths and
  472. * offsets for a given operation type until it is told to exit by
  473. * kthread_stop(). There may be multiple threads running this function
  474. * in parallel for a single channel, and there may be multiple channels
  475. * being tested in parallel.
  476. *
  477. * Before each test, the source and destination buffer is initialized
  478. * with a known pattern. This pattern is different depending on
  479. * whether it's in an area which is supposed to be copied or
  480. * overwritten, and different in the source and destination buffers.
  481. * So if the DMA engine doesn't copy exactly what we tell it to copy,
  482. * we'll notice.
  483. */
  484. static int dmatest_func(void *data)
  485. {
  486. struct dmatest_thread *thread = data;
  487. struct dmatest_done *done = &thread->test_done;
  488. struct dmatest_info *info;
  489. struct dmatest_params *params;
  490. struct dma_chan *chan;
  491. struct dma_device *dev;
  492. unsigned int error_count;
  493. unsigned int failed_tests = 0;
  494. unsigned int total_tests = 0;
  495. dma_cookie_t cookie;
  496. enum dma_status status;
  497. enum dma_ctrl_flags flags;
  498. u8 *pq_coefs = NULL;
  499. int ret;
  500. unsigned int buf_size;
  501. struct dmatest_data *src;
  502. struct dmatest_data *dst;
  503. int i;
  504. ktime_t ktime, start, diff;
  505. ktime_t filltime = 0;
  506. ktime_t comparetime = 0;
  507. s64 runtime = 0;
  508. unsigned long long total_len = 0;
  509. unsigned long long iops = 0;
  510. u8 align = 0;
  511. bool is_memset = false;
  512. dma_addr_t *srcs;
  513. dma_addr_t *dma_pq;
  514. set_freezable();
  515. ret = -ENOMEM;
  516. smp_rmb();
  517. thread->pending = false;
  518. info = thread->info;
  519. params = &info->params;
  520. chan = thread->chan;
  521. dev = chan->device;
  522. src = &thread->src;
  523. dst = &thread->dst;
  524. if (thread->type == DMA_MEMCPY) {
  525. align = params->alignment < 0 ? dev->copy_align :
  526. params->alignment;
  527. src->cnt = dst->cnt = 1;
  528. } else if (thread->type == DMA_MEMSET) {
  529. align = params->alignment < 0 ? dev->fill_align :
  530. params->alignment;
  531. src->cnt = dst->cnt = 1;
  532. is_memset = true;
  533. } else if (thread->type == DMA_XOR) {
  534. /* force odd to ensure dst = src */
  535. src->cnt = min_odd(params->xor_sources | 1, dev->max_xor);
  536. dst->cnt = 1;
  537. align = params->alignment < 0 ? dev->xor_align :
  538. params->alignment;
  539. } else if (thread->type == DMA_PQ) {
  540. /* force odd to ensure dst = src */
  541. src->cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
  542. dst->cnt = 2;
  543. align = params->alignment < 0 ? dev->pq_align :
  544. params->alignment;
  545. pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
  546. if (!pq_coefs)
  547. goto err_thread_type;
  548. for (i = 0; i < src->cnt; i++)
  549. pq_coefs[i] = 1;
  550. } else
  551. goto err_thread_type;
  552. /* Check if buffer count fits into map count variable (u8) */
  553. if ((src->cnt + dst->cnt) >= 255) {
  554. pr_err("too many buffers (%d of 255 supported)\n",
  555. src->cnt + dst->cnt);
  556. goto err_free_coefs;
  557. }
  558. buf_size = params->buf_size;
  559. if (1 << align > buf_size) {
  560. pr_err("%u-byte buffer too small for %d-byte alignment\n",
  561. buf_size, 1 << align);
  562. goto err_free_coefs;
  563. }
  564. if (dmatest_alloc_test_data(src, buf_size, align) < 0)
  565. goto err_free_coefs;
  566. if (dmatest_alloc_test_data(dst, buf_size, align) < 0)
  567. goto err_src;
  568. set_user_nice(current, 10);
  569. srcs = kcalloc(src->cnt, sizeof(dma_addr_t), GFP_KERNEL);
  570. if (!srcs)
  571. goto err_dst;
  572. dma_pq = kcalloc(dst->cnt, sizeof(dma_addr_t), GFP_KERNEL);
  573. if (!dma_pq)
  574. goto err_srcs_array;
  575. /*
  576. * src and dst buffers are freed by ourselves below
  577. */
  578. if (params->polled)
  579. flags = DMA_CTRL_ACK;
  580. else
  581. flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
  582. ktime = ktime_get();
  583. while (!(kthread_should_stop() ||
  584. (params->iterations && total_tests >= params->iterations))) {
  585. struct dma_async_tx_descriptor *tx = NULL;
  586. struct dmaengine_unmap_data *um;
  587. dma_addr_t *dsts;
  588. unsigned int len;
  589. total_tests++;
  590. if (params->transfer_size) {
  591. if (params->transfer_size >= buf_size) {
  592. pr_err("%u-byte transfer size must be lower than %u-buffer size\n",
  593. params->transfer_size, buf_size);
  594. break;
  595. }
  596. len = params->transfer_size;
  597. } else if (params->norandom) {
  598. len = buf_size;
  599. } else {
  600. len = dmatest_random() % buf_size + 1;
  601. }
  602. /* Do not alter transfer size explicitly defined by user */
  603. if (!params->transfer_size) {
  604. len = (len >> align) << align;
  605. if (!len)
  606. len = 1 << align;
  607. }
  608. total_len += len;
  609. if (params->norandom) {
  610. src->off = 0;
  611. dst->off = 0;
  612. } else {
  613. src->off = dmatest_random() % (buf_size - len + 1);
  614. dst->off = dmatest_random() % (buf_size - len + 1);
  615. src->off = (src->off >> align) << align;
  616. dst->off = (dst->off >> align) << align;
  617. }
  618. if (!params->noverify) {
  619. start = ktime_get();
  620. dmatest_init_srcs(src->aligned, src->off, len,
  621. buf_size, is_memset);
  622. dmatest_init_dsts(dst->aligned, dst->off, len,
  623. buf_size, is_memset);
  624. diff = ktime_sub(ktime_get(), start);
  625. filltime = ktime_add(filltime, diff);
  626. }
  627. um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt,
  628. GFP_KERNEL);
  629. if (!um) {
  630. failed_tests++;
  631. result("unmap data NULL", total_tests,
  632. src->off, dst->off, len, ret);
  633. continue;
  634. }
  635. um->len = buf_size;
  636. for (i = 0; i < src->cnt; i++) {
  637. void *buf = src->aligned[i];
  638. struct page *pg = virt_to_page(buf);
  639. unsigned long pg_off = offset_in_page(buf);
  640. um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
  641. um->len, DMA_TO_DEVICE);
  642. srcs[i] = um->addr[i] + src->off;
  643. ret = dma_mapping_error(dev->dev, um->addr[i]);
  644. if (ret) {
  645. result("src mapping error", total_tests,
  646. src->off, dst->off, len, ret);
  647. goto error_unmap_continue;
  648. }
  649. um->to_cnt++;
  650. }
  651. /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
  652. dsts = &um->addr[src->cnt];
  653. for (i = 0; i < dst->cnt; i++) {
  654. void *buf = dst->aligned[i];
  655. struct page *pg = virt_to_page(buf);
  656. unsigned long pg_off = offset_in_page(buf);
  657. dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
  658. DMA_BIDIRECTIONAL);
  659. ret = dma_mapping_error(dev->dev, dsts[i]);
  660. if (ret) {
  661. result("dst mapping error", total_tests,
  662. src->off, dst->off, len, ret);
  663. goto error_unmap_continue;
  664. }
  665. um->bidi_cnt++;
  666. }
  667. if (thread->type == DMA_MEMCPY)
  668. tx = dev->device_prep_dma_memcpy(chan,
  669. dsts[0] + dst->off,
  670. srcs[0], len, flags);
  671. else if (thread->type == DMA_MEMSET)
  672. tx = dev->device_prep_dma_memset(chan,
  673. dsts[0] + dst->off,
  674. *(src->aligned[0] + src->off),
  675. len, flags);
  676. else if (thread->type == DMA_XOR)
  677. tx = dev->device_prep_dma_xor(chan,
  678. dsts[0] + dst->off,
  679. srcs, src->cnt,
  680. len, flags);
  681. else if (thread->type == DMA_PQ) {
  682. for (i = 0; i < dst->cnt; i++)
  683. dma_pq[i] = dsts[i] + dst->off;
  684. tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
  685. src->cnt, pq_coefs,
  686. len, flags);
  687. }
  688. if (!tx) {
  689. result("prep error", total_tests, src->off,
  690. dst->off, len, ret);
  691. msleep(100);
  692. goto error_unmap_continue;
  693. }
  694. done->done = false;
  695. if (!params->polled) {
  696. tx->callback = dmatest_callback;
  697. tx->callback_param = done;
  698. }
  699. cookie = tx->tx_submit(tx);
  700. if (dma_submit_error(cookie)) {
  701. result("submit error", total_tests, src->off,
  702. dst->off, len, ret);
  703. msleep(100);
  704. goto error_unmap_continue;
  705. }
  706. if (params->polled) {
  707. status = dma_sync_wait(chan, cookie);
  708. dmaengine_terminate_sync(chan);
  709. if (status == DMA_COMPLETE)
  710. done->done = true;
  711. } else {
  712. dma_async_issue_pending(chan);
  713. wait_event_freezable_timeout(thread->done_wait,
  714. done->done,
  715. msecs_to_jiffies(params->timeout));
  716. status = dma_async_is_tx_complete(chan, cookie, NULL,
  717. NULL);
  718. }
  719. if (!done->done) {
  720. result("test timed out", total_tests, src->off, dst->off,
  721. len, 0);
  722. goto error_unmap_continue;
  723. } else if (status != DMA_COMPLETE &&
  724. !(dma_has_cap(DMA_COMPLETION_NO_ORDER,
  725. dev->cap_mask) &&
  726. status == DMA_OUT_OF_ORDER)) {
  727. result(status == DMA_ERROR ?
  728. "completion error status" :
  729. "completion busy status", total_tests, src->off,
  730. dst->off, len, ret);
  731. goto error_unmap_continue;
  732. }
  733. dmaengine_unmap_put(um);
  734. if (params->noverify) {
  735. verbose_result("test passed", total_tests, src->off,
  736. dst->off, len, 0);
  737. continue;
  738. }
  739. start = ktime_get();
  740. pr_debug("%s: verifying source buffer...\n", current->comm);
  741. error_count = dmatest_verify(src->aligned, 0, src->off,
  742. 0, PATTERN_SRC, true, is_memset);
  743. error_count += dmatest_verify(src->aligned, src->off,
  744. src->off + len, src->off,
  745. PATTERN_SRC | PATTERN_COPY, true, is_memset);
  746. error_count += dmatest_verify(src->aligned, src->off + len,
  747. buf_size, src->off + len,
  748. PATTERN_SRC, true, is_memset);
  749. pr_debug("%s: verifying dest buffer...\n", current->comm);
  750. error_count += dmatest_verify(dst->aligned, 0, dst->off,
  751. 0, PATTERN_DST, false, is_memset);
  752. error_count += dmatest_verify(dst->aligned, dst->off,
  753. dst->off + len, src->off,
  754. PATTERN_SRC | PATTERN_COPY, false, is_memset);
  755. error_count += dmatest_verify(dst->aligned, dst->off + len,
  756. buf_size, dst->off + len,
  757. PATTERN_DST, false, is_memset);
  758. diff = ktime_sub(ktime_get(), start);
  759. comparetime = ktime_add(comparetime, diff);
  760. if (error_count) {
  761. result("data error", total_tests, src->off, dst->off,
  762. len, error_count);
  763. failed_tests++;
  764. } else {
  765. verbose_result("test passed", total_tests, src->off,
  766. dst->off, len, 0);
  767. }
  768. continue;
  769. error_unmap_continue:
  770. dmaengine_unmap_put(um);
  771. failed_tests++;
  772. }
  773. ktime = ktime_sub(ktime_get(), ktime);
  774. ktime = ktime_sub(ktime, comparetime);
  775. ktime = ktime_sub(ktime, filltime);
  776. runtime = ktime_to_us(ktime);
  777. ret = 0;
  778. kfree(dma_pq);
  779. err_srcs_array:
  780. kfree(srcs);
  781. err_dst:
  782. dmatest_free_test_data(dst);
  783. err_src:
  784. dmatest_free_test_data(src);
  785. err_free_coefs:
  786. kfree(pq_coefs);
  787. err_thread_type:
  788. iops = dmatest_persec(runtime, total_tests);
  789. pr_info("%s: summary %u tests, %u failures %llu.%02llu iops %llu KB/s (%d)\n",
  790. current->comm, total_tests, failed_tests,
  791. FIXPT_TO_INT(iops), FIXPT_GET_FRAC(iops),
  792. dmatest_KBs(runtime, total_len), ret);
  793. /* terminate all transfers on specified channels */
  794. if (ret || failed_tests)
  795. dmaengine_terminate_sync(chan);
  796. thread->done = true;
  797. wake_up(&thread_wait);
  798. return ret;
  799. }
  800. static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
  801. {
  802. struct dmatest_thread *thread;
  803. struct dmatest_thread *_thread;
  804. int ret;
  805. list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
  806. ret = kthread_stop(thread->task);
  807. pr_debug("thread %s exited with status %d\n",
  808. thread->task->comm, ret);
  809. list_del(&thread->node);
  810. put_task_struct(thread->task);
  811. kfree(thread);
  812. }
  813. /* terminate all transfers on specified channels */
  814. dmaengine_terminate_sync(dtc->chan);
  815. kfree(dtc);
  816. }
  817. static int dmatest_add_threads(struct dmatest_info *info,
  818. struct dmatest_chan *dtc, enum dma_transaction_type type)
  819. {
  820. struct dmatest_params *params = &info->params;
  821. struct dmatest_thread *thread;
  822. struct dma_chan *chan = dtc->chan;
  823. char *op;
  824. unsigned int i;
  825. if (type == DMA_MEMCPY)
  826. op = "copy";
  827. else if (type == DMA_MEMSET)
  828. op = "set";
  829. else if (type == DMA_XOR)
  830. op = "xor";
  831. else if (type == DMA_PQ)
  832. op = "pq";
  833. else
  834. return -EINVAL;
  835. for (i = 0; i < params->threads_per_chan; i++) {
  836. thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
  837. if (!thread) {
  838. pr_warn("No memory for %s-%s%u\n",
  839. dma_chan_name(chan), op, i);
  840. break;
  841. }
  842. thread->info = info;
  843. thread->chan = dtc->chan;
  844. thread->type = type;
  845. thread->test_done.wait = &thread->done_wait;
  846. init_waitqueue_head(&thread->done_wait);
  847. smp_wmb();
  848. thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
  849. dma_chan_name(chan), op, i);
  850. if (IS_ERR(thread->task)) {
  851. pr_warn("Failed to create thread %s-%s%u\n",
  852. dma_chan_name(chan), op, i);
  853. kfree(thread);
  854. break;
  855. }
  856. /* srcbuf and dstbuf are allocated by the thread itself */
  857. get_task_struct(thread->task);
  858. list_add_tail(&thread->node, &dtc->threads);
  859. thread->pending = true;
  860. }
  861. return i;
  862. }
  863. static int dmatest_add_channel(struct dmatest_info *info,
  864. struct dma_chan *chan)
  865. {
  866. struct dmatest_chan *dtc;
  867. struct dma_device *dma_dev = chan->device;
  868. unsigned int thread_count = 0;
  869. int cnt;
  870. dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
  871. if (!dtc) {
  872. pr_warn("No memory for %s\n", dma_chan_name(chan));
  873. return -ENOMEM;
  874. }
  875. dtc->chan = chan;
  876. INIT_LIST_HEAD(&dtc->threads);
  877. if (dma_has_cap(DMA_COMPLETION_NO_ORDER, dma_dev->cap_mask) &&
  878. info->params.polled) {
  879. info->params.polled = false;
  880. pr_warn("DMA_COMPLETION_NO_ORDER, polled disabled\n");
  881. }
  882. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
  883. if (dmatest == 0) {
  884. cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
  885. thread_count += cnt > 0 ? cnt : 0;
  886. }
  887. }
  888. if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
  889. if (dmatest == 1) {
  890. cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
  891. thread_count += cnt > 0 ? cnt : 0;
  892. }
  893. }
  894. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  895. cnt = dmatest_add_threads(info, dtc, DMA_XOR);
  896. thread_count += cnt > 0 ? cnt : 0;
  897. }
  898. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
  899. cnt = dmatest_add_threads(info, dtc, DMA_PQ);
  900. thread_count += cnt > 0 ? cnt : 0;
  901. }
  902. pr_info("Added %u threads using %s\n",
  903. thread_count, dma_chan_name(chan));
  904. list_add_tail(&dtc->node, &info->channels);
  905. info->nr_channels++;
  906. return 0;
  907. }
  908. static bool filter(struct dma_chan *chan, void *param)
  909. {
  910. return dmatest_match_channel(param, chan) && dmatest_match_device(param, chan->device);
  911. }
  912. static void request_channels(struct dmatest_info *info,
  913. enum dma_transaction_type type)
  914. {
  915. dma_cap_mask_t mask;
  916. dma_cap_zero(mask);
  917. dma_cap_set(type, mask);
  918. for (;;) {
  919. struct dmatest_params *params = &info->params;
  920. struct dma_chan *chan;
  921. chan = dma_request_channel(mask, filter, params);
  922. if (chan) {
  923. if (dmatest_add_channel(info, chan)) {
  924. dma_release_channel(chan);
  925. break; /* add_channel failed, punt */
  926. }
  927. } else
  928. break; /* no more channels available */
  929. if (params->max_channels &&
  930. info->nr_channels >= params->max_channels)
  931. break; /* we have all we need */
  932. }
  933. }
  934. static void add_threaded_test(struct dmatest_info *info)
  935. {
  936. struct dmatest_params *params = &info->params;
  937. /* Copy test parameters */
  938. params->buf_size = test_buf_size;
  939. strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
  940. strlcpy(params->device, strim(test_device), sizeof(params->device));
  941. params->threads_per_chan = threads_per_chan;
  942. params->max_channels = max_channels;
  943. params->iterations = iterations;
  944. params->xor_sources = xor_sources;
  945. params->pq_sources = pq_sources;
  946. params->timeout = timeout;
  947. params->noverify = noverify;
  948. params->norandom = norandom;
  949. params->alignment = alignment;
  950. params->transfer_size = transfer_size;
  951. params->polled = polled;
  952. request_channels(info, DMA_MEMCPY);
  953. request_channels(info, DMA_MEMSET);
  954. request_channels(info, DMA_XOR);
  955. request_channels(info, DMA_PQ);
  956. }
  957. static void run_pending_tests(struct dmatest_info *info)
  958. {
  959. struct dmatest_chan *dtc;
  960. unsigned int thread_count = 0;
  961. list_for_each_entry(dtc, &info->channels, node) {
  962. struct dmatest_thread *thread;
  963. thread_count = 0;
  964. list_for_each_entry(thread, &dtc->threads, node) {
  965. wake_up_process(thread->task);
  966. thread_count++;
  967. }
  968. pr_info("Started %u threads using %s\n",
  969. thread_count, dma_chan_name(dtc->chan));
  970. }
  971. }
  972. static void stop_threaded_test(struct dmatest_info *info)
  973. {
  974. struct dmatest_chan *dtc, *_dtc;
  975. struct dma_chan *chan;
  976. list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
  977. list_del(&dtc->node);
  978. chan = dtc->chan;
  979. dmatest_cleanup_channel(dtc);
  980. pr_debug("dropped channel %s\n", dma_chan_name(chan));
  981. dma_release_channel(chan);
  982. }
  983. info->nr_channels = 0;
  984. }
  985. static void start_threaded_tests(struct dmatest_info *info)
  986. {
  987. /* we might be called early to set run=, defer running until all
  988. * parameters have been evaluated
  989. */
  990. if (!info->did_init)
  991. return;
  992. run_pending_tests(info);
  993. }
  994. static int dmatest_run_get(char *val, const struct kernel_param *kp)
  995. {
  996. struct dmatest_info *info = &test_info;
  997. mutex_lock(&info->lock);
  998. if (is_threaded_test_run(info)) {
  999. dmatest_run = true;
  1000. } else {
  1001. if (!is_threaded_test_pending(info))
  1002. stop_threaded_test(info);
  1003. dmatest_run = false;
  1004. }
  1005. mutex_unlock(&info->lock);
  1006. return param_get_bool(val, kp);
  1007. }
  1008. static int dmatest_run_set(const char *val, const struct kernel_param *kp)
  1009. {
  1010. struct dmatest_info *info = &test_info;
  1011. int ret;
  1012. mutex_lock(&info->lock);
  1013. ret = param_set_bool(val, kp);
  1014. if (ret) {
  1015. mutex_unlock(&info->lock);
  1016. return ret;
  1017. } else if (dmatest_run) {
  1018. if (!is_threaded_test_pending(info)) {
  1019. /*
  1020. * We have nothing to run. This can be due to:
  1021. */
  1022. ret = info->last_error;
  1023. if (ret) {
  1024. /* 1) Misconfiguration */
  1025. pr_err("Channel misconfigured, can't continue\n");
  1026. mutex_unlock(&info->lock);
  1027. return ret;
  1028. } else {
  1029. /* 2) We rely on defaults */
  1030. pr_info("No channels configured, continue with any\n");
  1031. if (!is_threaded_test_run(info))
  1032. stop_threaded_test(info);
  1033. add_threaded_test(info);
  1034. }
  1035. }
  1036. start_threaded_tests(info);
  1037. } else {
  1038. stop_threaded_test(info);
  1039. }
  1040. mutex_unlock(&info->lock);
  1041. return ret;
  1042. }
  1043. static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
  1044. {
  1045. struct dmatest_info *info = &test_info;
  1046. struct dmatest_chan *dtc;
  1047. char chan_reset_val[20];
  1048. int ret;
  1049. mutex_lock(&info->lock);
  1050. ret = param_set_copystring(val, kp);
  1051. if (ret) {
  1052. mutex_unlock(&info->lock);
  1053. return ret;
  1054. }
  1055. /*Clear any previously run threads */
  1056. if (!is_threaded_test_run(info) && !is_threaded_test_pending(info))
  1057. stop_threaded_test(info);
  1058. /* Reject channels that are already registered */
  1059. if (is_threaded_test_pending(info)) {
  1060. list_for_each_entry(dtc, &info->channels, node) {
  1061. if (strcmp(dma_chan_name(dtc->chan),
  1062. strim(test_channel)) == 0) {
  1063. dtc = list_last_entry(&info->channels,
  1064. struct dmatest_chan,
  1065. node);
  1066. strlcpy(chan_reset_val,
  1067. dma_chan_name(dtc->chan),
  1068. sizeof(chan_reset_val));
  1069. ret = -EBUSY;
  1070. goto add_chan_err;
  1071. }
  1072. }
  1073. }
  1074. add_threaded_test(info);
  1075. /* Check if channel was added successfully */
  1076. if (!list_empty(&info->channels)) {
  1077. /*
  1078. * if new channel was not successfully added, revert the
  1079. * "test_channel" string to the name of the last successfully
  1080. * added channel. exception for when users issues empty string
  1081. * to channel parameter.
  1082. */
  1083. dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
  1084. if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
  1085. && (strcmp("", strim(test_channel)) != 0)) {
  1086. ret = -EINVAL;
  1087. strlcpy(chan_reset_val, dma_chan_name(dtc->chan),
  1088. sizeof(chan_reset_val));
  1089. goto add_chan_err;
  1090. }
  1091. } else {
  1092. /* Clear test_channel if no channels were added successfully */
  1093. strlcpy(chan_reset_val, "", sizeof(chan_reset_val));
  1094. ret = -EBUSY;
  1095. goto add_chan_err;
  1096. }
  1097. info->last_error = ret;
  1098. mutex_unlock(&info->lock);
  1099. return ret;
  1100. add_chan_err:
  1101. param_set_copystring(chan_reset_val, kp);
  1102. info->last_error = ret;
  1103. mutex_unlock(&info->lock);
  1104. return ret;
  1105. }
  1106. static int dmatest_chan_get(char *val, const struct kernel_param *kp)
  1107. {
  1108. struct dmatest_info *info = &test_info;
  1109. mutex_lock(&info->lock);
  1110. if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) {
  1111. stop_threaded_test(info);
  1112. strlcpy(test_channel, "", sizeof(test_channel));
  1113. }
  1114. mutex_unlock(&info->lock);
  1115. return param_get_string(val, kp);
  1116. }
  1117. static int dmatest_test_list_get(char *val, const struct kernel_param *kp)
  1118. {
  1119. struct dmatest_info *info = &test_info;
  1120. struct dmatest_chan *dtc;
  1121. unsigned int thread_count = 0;
  1122. list_for_each_entry(dtc, &info->channels, node) {
  1123. struct dmatest_thread *thread;
  1124. thread_count = 0;
  1125. list_for_each_entry(thread, &dtc->threads, node) {
  1126. thread_count++;
  1127. }
  1128. pr_info("%u threads using %s\n",
  1129. thread_count, dma_chan_name(dtc->chan));
  1130. }
  1131. return 0;
  1132. }
  1133. static int __init dmatest_init(void)
  1134. {
  1135. struct dmatest_info *info = &test_info;
  1136. struct dmatest_params *params = &info->params;
  1137. if (dmatest_run) {
  1138. mutex_lock(&info->lock);
  1139. add_threaded_test(info);
  1140. run_pending_tests(info);
  1141. mutex_unlock(&info->lock);
  1142. }
  1143. if (params->iterations && wait)
  1144. wait_event(thread_wait, !is_threaded_test_run(info));
  1145. /* module parameters are stable, inittime tests are started,
  1146. * let userspace take over 'run' control
  1147. */
  1148. info->did_init = true;
  1149. return 0;
  1150. }
  1151. /* when compiled-in wait for drivers to load first */
  1152. late_initcall(dmatest_init);
  1153. static void __exit dmatest_exit(void)
  1154. {
  1155. struct dmatest_info *info = &test_info;
  1156. mutex_lock(&info->lock);
  1157. stop_threaded_test(info);
  1158. mutex_unlock(&info->lock);
  1159. }
  1160. module_exit(dmatest_exit);
  1161. MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
  1162. MODULE_LICENSE("GPL v2");