seq_instr.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. /*
  2. * Generic Instrument routines for ALSA sequencer
  3. * Copyright (c) 1999 by Jaroslav Kysela <perex@suse.cz>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. *
  19. */
  20. #include <sound/driver.h>
  21. #include <linux/init.h>
  22. #include <linux/slab.h>
  23. #include <sound/core.h>
  24. #include "seq_clientmgr.h"
  25. #include <sound/seq_instr.h>
  26. #include <sound/initval.h>
  27. MODULE_AUTHOR("Jaroslav Kysela <perex@suse.cz>");
  28. MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer instrument library.");
  29. MODULE_LICENSE("GPL");
  30. static void snd_instr_lock_ops(struct snd_seq_kinstr_list *list)
  31. {
  32. if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
  33. spin_lock_irqsave(&list->ops_lock, list->ops_flags);
  34. } else {
  35. mutex_lock(&list->ops_mutex);
  36. }
  37. }
  38. static void snd_instr_unlock_ops(struct snd_seq_kinstr_list *list)
  39. {
  40. if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
  41. spin_unlock_irqrestore(&list->ops_lock, list->ops_flags);
  42. } else {
  43. mutex_unlock(&list->ops_mutex);
  44. }
  45. }
  46. static struct snd_seq_kinstr *snd_seq_instr_new(int add_len, int atomic)
  47. {
  48. struct snd_seq_kinstr *instr;
  49. instr = kzalloc(sizeof(struct snd_seq_kinstr) + add_len, atomic ? GFP_ATOMIC : GFP_KERNEL);
  50. if (instr == NULL)
  51. return NULL;
  52. instr->add_len = add_len;
  53. return instr;
  54. }
  55. static int snd_seq_instr_free(struct snd_seq_kinstr *instr, int atomic)
  56. {
  57. int result = 0;
  58. if (instr == NULL)
  59. return -EINVAL;
  60. if (instr->ops && instr->ops->remove)
  61. result = instr->ops->remove(instr->ops->private_data, instr, 1);
  62. if (!result)
  63. kfree(instr);
  64. return result;
  65. }
  66. struct snd_seq_kinstr_list *snd_seq_instr_list_new(void)
  67. {
  68. struct snd_seq_kinstr_list *list;
  69. list = kzalloc(sizeof(struct snd_seq_kinstr_list), GFP_KERNEL);
  70. if (list == NULL)
  71. return NULL;
  72. spin_lock_init(&list->lock);
  73. spin_lock_init(&list->ops_lock);
  74. mutex_init(&list->ops_mutex);
  75. list->owner = -1;
  76. return list;
  77. }
  78. void snd_seq_instr_list_free(struct snd_seq_kinstr_list **list_ptr)
  79. {
  80. struct snd_seq_kinstr_list *list;
  81. struct snd_seq_kinstr *instr;
  82. struct snd_seq_kcluster *cluster;
  83. int idx;
  84. unsigned long flags;
  85. if (list_ptr == NULL)
  86. return;
  87. list = *list_ptr;
  88. *list_ptr = NULL;
  89. if (list == NULL)
  90. return;
  91. for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {
  92. while ((instr = list->hash[idx]) != NULL) {
  93. list->hash[idx] = instr->next;
  94. list->count--;
  95. spin_lock_irqsave(&list->lock, flags);
  96. while (instr->use) {
  97. spin_unlock_irqrestore(&list->lock, flags);
  98. schedule_timeout_interruptible(1);
  99. spin_lock_irqsave(&list->lock, flags);
  100. }
  101. spin_unlock_irqrestore(&list->lock, flags);
  102. if (snd_seq_instr_free(instr, 0)<0)
  103. snd_printk(KERN_WARNING "instrument free problem\n");
  104. }
  105. while ((cluster = list->chash[idx]) != NULL) {
  106. list->chash[idx] = cluster->next;
  107. list->ccount--;
  108. kfree(cluster);
  109. }
  110. }
  111. kfree(list);
  112. }
  113. static int instr_free_compare(struct snd_seq_kinstr *instr,
  114. struct snd_seq_instr_header *ifree,
  115. unsigned int client)
  116. {
  117. switch (ifree->cmd) {
  118. case SNDRV_SEQ_INSTR_FREE_CMD_ALL:
  119. /* all, except private for other clients */
  120. if ((instr->instr.std & 0xff000000) == 0)
  121. return 0;
  122. if (((instr->instr.std >> 24) & 0xff) == client)
  123. return 0;
  124. return 1;
  125. case SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE:
  126. /* all my private instruments */
  127. if ((instr->instr.std & 0xff000000) == 0)
  128. return 1;
  129. if (((instr->instr.std >> 24) & 0xff) == client)
  130. return 0;
  131. return 1;
  132. case SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER:
  133. /* all my private instruments */
  134. if ((instr->instr.std & 0xff000000) == 0) {
  135. if (instr->instr.cluster == ifree->id.cluster)
  136. return 0;
  137. return 1;
  138. }
  139. if (((instr->instr.std >> 24) & 0xff) == client) {
  140. if (instr->instr.cluster == ifree->id.cluster)
  141. return 0;
  142. }
  143. return 1;
  144. }
  145. return 1;
  146. }
  147. int snd_seq_instr_list_free_cond(struct snd_seq_kinstr_list *list,
  148. struct snd_seq_instr_header *ifree,
  149. int client,
  150. int atomic)
  151. {
  152. struct snd_seq_kinstr *instr, *prev, *next, *flist;
  153. int idx;
  154. unsigned long flags;
  155. snd_instr_lock_ops(list);
  156. for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {
  157. spin_lock_irqsave(&list->lock, flags);
  158. instr = list->hash[idx];
  159. prev = flist = NULL;
  160. while (instr) {
  161. while (instr && instr_free_compare(instr, ifree, (unsigned int)client)) {
  162. prev = instr;
  163. instr = instr->next;
  164. }
  165. if (instr == NULL)
  166. continue;
  167. if (instr->ops && instr->ops->notify)
  168. instr->ops->notify(instr->ops->private_data, instr, SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
  169. next = instr->next;
  170. if (prev == NULL) {
  171. list->hash[idx] = next;
  172. } else {
  173. prev->next = next;
  174. }
  175. list->count--;
  176. instr->next = flist;
  177. flist = instr;
  178. instr = next;
  179. }
  180. spin_unlock_irqrestore(&list->lock, flags);
  181. while (flist) {
  182. instr = flist;
  183. flist = instr->next;
  184. while (instr->use)
  185. schedule_timeout_interruptible(1);
  186. if (snd_seq_instr_free(instr, atomic)<0)
  187. snd_printk(KERN_WARNING "instrument free problem\n");
  188. instr = next;
  189. }
  190. }
  191. snd_instr_unlock_ops(list);
  192. return 0;
  193. }
  194. static int compute_hash_instr_key(struct snd_seq_instr *instr)
  195. {
  196. int result;
  197. result = instr->bank | (instr->prg << 16);
  198. result += result >> 24;
  199. result += result >> 16;
  200. result += result >> 8;
  201. return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
  202. }
  203. #if 0
  204. static int compute_hash_cluster_key(snd_seq_instr_cluster_t cluster)
  205. {
  206. int result;
  207. result = cluster;
  208. result += result >> 24;
  209. result += result >> 16;
  210. result += result >> 8;
  211. return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
  212. }
  213. #endif
  214. static int compare_instr(struct snd_seq_instr *i1, struct snd_seq_instr *i2, int exact)
  215. {
  216. if (exact) {
  217. if (i1->cluster != i2->cluster ||
  218. i1->bank != i2->bank ||
  219. i1->prg != i2->prg)
  220. return 1;
  221. if ((i1->std & 0xff000000) != (i2->std & 0xff000000))
  222. return 1;
  223. if (!(i1->std & i2->std))
  224. return 1;
  225. return 0;
  226. } else {
  227. unsigned int client_check;
  228. if (i2->cluster && i1->cluster != i2->cluster)
  229. return 1;
  230. client_check = i2->std & 0xff000000;
  231. if (client_check) {
  232. if ((i1->std & 0xff000000) != client_check)
  233. return 1;
  234. } else {
  235. if ((i1->std & i2->std) != i2->std)
  236. return 1;
  237. }
  238. return i1->bank != i2->bank || i1->prg != i2->prg;
  239. }
  240. }
  241. struct snd_seq_kinstr *snd_seq_instr_find(struct snd_seq_kinstr_list *list,
  242. struct snd_seq_instr *instr,
  243. int exact,
  244. int follow_alias)
  245. {
  246. unsigned long flags;
  247. int depth = 0;
  248. struct snd_seq_kinstr *result;
  249. if (list == NULL || instr == NULL)
  250. return NULL;
  251. spin_lock_irqsave(&list->lock, flags);
  252. __again:
  253. result = list->hash[compute_hash_instr_key(instr)];
  254. while (result) {
  255. if (!compare_instr(&result->instr, instr, exact)) {
  256. if (follow_alias && (result->type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)) {
  257. instr = (struct snd_seq_instr *)KINSTR_DATA(result);
  258. if (++depth > 10)
  259. goto __not_found;
  260. goto __again;
  261. }
  262. result->use++;
  263. spin_unlock_irqrestore(&list->lock, flags);
  264. return result;
  265. }
  266. result = result->next;
  267. }
  268. __not_found:
  269. spin_unlock_irqrestore(&list->lock, flags);
  270. return NULL;
  271. }
  272. void snd_seq_instr_free_use(struct snd_seq_kinstr_list *list,
  273. struct snd_seq_kinstr *instr)
  274. {
  275. unsigned long flags;
  276. if (list == NULL || instr == NULL)
  277. return;
  278. spin_lock_irqsave(&list->lock, flags);
  279. if (instr->use <= 0) {
  280. snd_printk(KERN_ERR "free_use: fatal!!! use = %i, name = '%s'\n", instr->use, instr->name);
  281. } else {
  282. instr->use--;
  283. }
  284. spin_unlock_irqrestore(&list->lock, flags);
  285. }
  286. static struct snd_seq_kinstr_ops *instr_ops(struct snd_seq_kinstr_ops *ops,
  287. char *instr_type)
  288. {
  289. while (ops) {
  290. if (!strcmp(ops->instr_type, instr_type))
  291. return ops;
  292. ops = ops->next;
  293. }
  294. return NULL;
  295. }
  296. static int instr_result(struct snd_seq_event *ev,
  297. int type, int result,
  298. int atomic)
  299. {
  300. struct snd_seq_event sev;
  301. memset(&sev, 0, sizeof(sev));
  302. sev.type = SNDRV_SEQ_EVENT_RESULT;
  303. sev.flags = SNDRV_SEQ_TIME_STAMP_REAL | SNDRV_SEQ_EVENT_LENGTH_FIXED |
  304. SNDRV_SEQ_PRIORITY_NORMAL;
  305. sev.source = ev->dest;
  306. sev.dest = ev->source;
  307. sev.data.result.event = type;
  308. sev.data.result.result = result;
  309. #if 0
  310. printk("instr result - type = %i, result = %i, queue = %i, source.client:port = %i:%i, dest.client:port = %i:%i\n",
  311. type, result,
  312. sev.queue,
  313. sev.source.client, sev.source.port,
  314. sev.dest.client, sev.dest.port);
  315. #endif
  316. return snd_seq_kernel_client_dispatch(sev.source.client, &sev, atomic, 0);
  317. }
  318. static int instr_begin(struct snd_seq_kinstr_ops *ops,
  319. struct snd_seq_kinstr_list *list,
  320. struct snd_seq_event *ev,
  321. int atomic, int hop)
  322. {
  323. unsigned long flags;
  324. spin_lock_irqsave(&list->lock, flags);
  325. if (list->owner >= 0 && list->owner != ev->source.client) {
  326. spin_unlock_irqrestore(&list->lock, flags);
  327. return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, -EBUSY, atomic);
  328. }
  329. list->owner = ev->source.client;
  330. spin_unlock_irqrestore(&list->lock, flags);
  331. return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, 0, atomic);
  332. }
  333. static int instr_end(struct snd_seq_kinstr_ops *ops,
  334. struct snd_seq_kinstr_list *list,
  335. struct snd_seq_event *ev,
  336. int atomic, int hop)
  337. {
  338. unsigned long flags;
  339. /* TODO: timeout handling */
  340. spin_lock_irqsave(&list->lock, flags);
  341. if (list->owner == ev->source.client) {
  342. list->owner = -1;
  343. spin_unlock_irqrestore(&list->lock, flags);
  344. return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, 0, atomic);
  345. }
  346. spin_unlock_irqrestore(&list->lock, flags);
  347. return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, -EINVAL, atomic);
  348. }
  349. static int instr_info(struct snd_seq_kinstr_ops *ops,
  350. struct snd_seq_kinstr_list *list,
  351. struct snd_seq_event *ev,
  352. int atomic, int hop)
  353. {
  354. return -ENXIO;
  355. }
  356. static int instr_format_info(struct snd_seq_kinstr_ops *ops,
  357. struct snd_seq_kinstr_list *list,
  358. struct snd_seq_event *ev,
  359. int atomic, int hop)
  360. {
  361. return -ENXIO;
  362. }
  363. static int instr_reset(struct snd_seq_kinstr_ops *ops,
  364. struct snd_seq_kinstr_list *list,
  365. struct snd_seq_event *ev,
  366. int atomic, int hop)
  367. {
  368. return -ENXIO;
  369. }
  370. static int instr_status(struct snd_seq_kinstr_ops *ops,
  371. struct snd_seq_kinstr_list *list,
  372. struct snd_seq_event *ev,
  373. int atomic, int hop)
  374. {
  375. return -ENXIO;
  376. }
  377. static int instr_put(struct snd_seq_kinstr_ops *ops,
  378. struct snd_seq_kinstr_list *list,
  379. struct snd_seq_event *ev,
  380. int atomic, int hop)
  381. {
  382. unsigned long flags;
  383. struct snd_seq_instr_header put;
  384. struct snd_seq_kinstr *instr;
  385. int result = -EINVAL, len, key;
  386. if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
  387. goto __return;
  388. if (ev->data.ext.len < sizeof(struct snd_seq_instr_header))
  389. goto __return;
  390. if (copy_from_user(&put, (void __user *)ev->data.ext.ptr,
  391. sizeof(struct snd_seq_instr_header))) {
  392. result = -EFAULT;
  393. goto __return;
  394. }
  395. snd_instr_lock_ops(list);
  396. if (put.id.instr.std & 0xff000000) { /* private instrument */
  397. put.id.instr.std &= 0x00ffffff;
  398. put.id.instr.std |= (unsigned int)ev->source.client << 24;
  399. }
  400. if ((instr = snd_seq_instr_find(list, &put.id.instr, 1, 0))) {
  401. snd_seq_instr_free_use(list, instr);
  402. snd_instr_unlock_ops(list);
  403. result = -EBUSY;
  404. goto __return;
  405. }
  406. ops = instr_ops(ops, put.data.data.format);
  407. if (ops == NULL) {
  408. snd_instr_unlock_ops(list);
  409. goto __return;
  410. }
  411. len = ops->add_len;
  412. if (put.data.type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)
  413. len = sizeof(struct snd_seq_instr);
  414. instr = snd_seq_instr_new(len, atomic);
  415. if (instr == NULL) {
  416. snd_instr_unlock_ops(list);
  417. result = -ENOMEM;
  418. goto __return;
  419. }
  420. instr->ops = ops;
  421. instr->instr = put.id.instr;
  422. strlcpy(instr->name, put.data.name, sizeof(instr->name));
  423. instr->type = put.data.type;
  424. if (instr->type == SNDRV_SEQ_INSTR_ATYPE_DATA) {
  425. result = ops->put(ops->private_data,
  426. instr,
  427. (void __user *)ev->data.ext.ptr + sizeof(struct snd_seq_instr_header),
  428. ev->data.ext.len - sizeof(struct snd_seq_instr_header),
  429. atomic,
  430. put.cmd);
  431. if (result < 0) {
  432. snd_seq_instr_free(instr, atomic);
  433. snd_instr_unlock_ops(list);
  434. goto __return;
  435. }
  436. }
  437. key = compute_hash_instr_key(&instr->instr);
  438. spin_lock_irqsave(&list->lock, flags);
  439. instr->next = list->hash[key];
  440. list->hash[key] = instr;
  441. list->count++;
  442. spin_unlock_irqrestore(&list->lock, flags);
  443. snd_instr_unlock_ops(list);
  444. result = 0;
  445. __return:
  446. instr_result(ev, SNDRV_SEQ_EVENT_INSTR_PUT, result, atomic);
  447. return result;
  448. }
  449. static int instr_get(struct snd_seq_kinstr_ops *ops,
  450. struct snd_seq_kinstr_list *list,
  451. struct snd_seq_event *ev,
  452. int atomic, int hop)
  453. {
  454. return -ENXIO;
  455. }
  456. static int instr_free(struct snd_seq_kinstr_ops *ops,
  457. struct snd_seq_kinstr_list *list,
  458. struct snd_seq_event *ev,
  459. int atomic, int hop)
  460. {
  461. struct snd_seq_instr_header ifree;
  462. struct snd_seq_kinstr *instr, *prev;
  463. int result = -EINVAL;
  464. unsigned long flags;
  465. unsigned int hash;
  466. if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
  467. goto __return;
  468. if (ev->data.ext.len < sizeof(struct snd_seq_instr_header))
  469. goto __return;
  470. if (copy_from_user(&ifree, (void __user *)ev->data.ext.ptr,
  471. sizeof(struct snd_seq_instr_header))) {
  472. result = -EFAULT;
  473. goto __return;
  474. }
  475. if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_ALL ||
  476. ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE ||
  477. ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER) {
  478. result = snd_seq_instr_list_free_cond(list, &ifree, ev->dest.client, atomic);
  479. goto __return;
  480. }
  481. if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_SINGLE) {
  482. if (ifree.id.instr.std & 0xff000000) {
  483. ifree.id.instr.std &= 0x00ffffff;
  484. ifree.id.instr.std |= (unsigned int)ev->source.client << 24;
  485. }
  486. hash = compute_hash_instr_key(&ifree.id.instr);
  487. snd_instr_lock_ops(list);
  488. spin_lock_irqsave(&list->lock, flags);
  489. instr = list->hash[hash];
  490. prev = NULL;
  491. while (instr) {
  492. if (!compare_instr(&instr->instr, &ifree.id.instr, 1))
  493. goto __free_single;
  494. prev = instr;
  495. instr = instr->next;
  496. }
  497. result = -ENOENT;
  498. spin_unlock_irqrestore(&list->lock, flags);
  499. snd_instr_unlock_ops(list);
  500. goto __return;
  501. __free_single:
  502. if (prev) {
  503. prev->next = instr->next;
  504. } else {
  505. list->hash[hash] = instr->next;
  506. }
  507. if (instr->ops && instr->ops->notify)
  508. instr->ops->notify(instr->ops->private_data, instr,
  509. SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
  510. while (instr->use) {
  511. spin_unlock_irqrestore(&list->lock, flags);
  512. schedule_timeout_interruptible(1);
  513. spin_lock_irqsave(&list->lock, flags);
  514. }
  515. spin_unlock_irqrestore(&list->lock, flags);
  516. result = snd_seq_instr_free(instr, atomic);
  517. snd_instr_unlock_ops(list);
  518. goto __return;
  519. }
  520. __return:
  521. instr_result(ev, SNDRV_SEQ_EVENT_INSTR_FREE, result, atomic);
  522. return result;
  523. }
  524. static int instr_list(struct snd_seq_kinstr_ops *ops,
  525. struct snd_seq_kinstr_list *list,
  526. struct snd_seq_event *ev,
  527. int atomic, int hop)
  528. {
  529. return -ENXIO;
  530. }
  531. static int instr_cluster(struct snd_seq_kinstr_ops *ops,
  532. struct snd_seq_kinstr_list *list,
  533. struct snd_seq_event *ev,
  534. int atomic, int hop)
  535. {
  536. return -ENXIO;
  537. }
  538. int snd_seq_instr_event(struct snd_seq_kinstr_ops *ops,
  539. struct snd_seq_kinstr_list *list,
  540. struct snd_seq_event *ev,
  541. int client,
  542. int atomic,
  543. int hop)
  544. {
  545. int direct = 0;
  546. snd_assert(ops != NULL && list != NULL && ev != NULL, return -EINVAL);
  547. if (snd_seq_ev_is_direct(ev)) {
  548. direct = 1;
  549. switch (ev->type) {
  550. case SNDRV_SEQ_EVENT_INSTR_BEGIN:
  551. return instr_begin(ops, list, ev, atomic, hop);
  552. case SNDRV_SEQ_EVENT_INSTR_END:
  553. return instr_end(ops, list, ev, atomic, hop);
  554. }
  555. }
  556. if ((list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT) && !direct)
  557. return -EINVAL;
  558. switch (ev->type) {
  559. case SNDRV_SEQ_EVENT_INSTR_INFO:
  560. return instr_info(ops, list, ev, atomic, hop);
  561. case SNDRV_SEQ_EVENT_INSTR_FINFO:
  562. return instr_format_info(ops, list, ev, atomic, hop);
  563. case SNDRV_SEQ_EVENT_INSTR_RESET:
  564. return instr_reset(ops, list, ev, atomic, hop);
  565. case SNDRV_SEQ_EVENT_INSTR_STATUS:
  566. return instr_status(ops, list, ev, atomic, hop);
  567. case SNDRV_SEQ_EVENT_INSTR_PUT:
  568. return instr_put(ops, list, ev, atomic, hop);
  569. case SNDRV_SEQ_EVENT_INSTR_GET:
  570. return instr_get(ops, list, ev, atomic, hop);
  571. case SNDRV_SEQ_EVENT_INSTR_FREE:
  572. return instr_free(ops, list, ev, atomic, hop);
  573. case SNDRV_SEQ_EVENT_INSTR_LIST:
  574. return instr_list(ops, list, ev, atomic, hop);
  575. case SNDRV_SEQ_EVENT_INSTR_CLUSTER:
  576. return instr_cluster(ops, list, ev, atomic, hop);
  577. }
  578. return -EINVAL;
  579. }
  580. /*
  581. * Init part
  582. */
  583. static int __init alsa_seq_instr_init(void)
  584. {
  585. return 0;
  586. }
  587. static void __exit alsa_seq_instr_exit(void)
  588. {
  589. }
  590. module_init(alsa_seq_instr_init)
  591. module_exit(alsa_seq_instr_exit)
  592. EXPORT_SYMBOL(snd_seq_instr_list_new);
  593. EXPORT_SYMBOL(snd_seq_instr_list_free);
  594. EXPORT_SYMBOL(snd_seq_instr_list_free_cond);
  595. EXPORT_SYMBOL(snd_seq_instr_find);
  596. EXPORT_SYMBOL(snd_seq_instr_free_use);
  597. EXPORT_SYMBOL(snd_seq_instr_event);