rio_cm.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * rio_cm - RapidIO Channelized Messaging Driver
  4. *
  5. * Copyright 2013-2016 Integrated Device Technology, Inc.
  6. * Copyright (c) 2015, Prodrive Technologies
  7. * Copyright (c) 2015, RapidIO Trade Association
  8. */
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/delay.h>
  13. #include <linux/sched.h>
  14. #include <linux/rio.h>
  15. #include <linux/rio_drv.h>
  16. #include <linux/slab.h>
  17. #include <linux/idr.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/cdev.h>
  20. #include <linux/fs.h>
  21. #include <linux/poll.h>
  22. #include <linux/reboot.h>
  23. #include <linux/bitops.h>
  24. #include <linux/printk.h>
  25. #include <linux/rio_cm_cdev.h>
  26. #define DRV_NAME "rio_cm"
  27. #define DRV_VERSION "1.0.0"
  28. #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
  29. #define DRV_DESC "RapidIO Channelized Messaging Driver"
  30. #define DEV_NAME "rio_cm"
  31. /* Debug output filtering masks */
  32. enum {
  33. DBG_NONE = 0,
  34. DBG_INIT = BIT(0), /* driver init */
  35. DBG_EXIT = BIT(1), /* driver exit */
  36. DBG_MPORT = BIT(2), /* mport add/remove */
  37. DBG_RDEV = BIT(3), /* RapidIO device add/remove */
  38. DBG_CHOP = BIT(4), /* channel operations */
  39. DBG_WAIT = BIT(5), /* waiting for events */
  40. DBG_TX = BIT(6), /* message TX */
  41. DBG_TX_EVENT = BIT(7), /* message TX event */
  42. DBG_RX_DATA = BIT(8), /* inbound data messages */
  43. DBG_RX_CMD = BIT(9), /* inbound REQ/ACK/NACK messages */
  44. DBG_ALL = ~0,
  45. };
  46. #ifdef DEBUG
  47. #define riocm_debug(level, fmt, arg...) \
  48. do { \
  49. if (DBG_##level & dbg_level) \
  50. pr_debug(DRV_NAME ": %s " fmt "\n", \
  51. __func__, ##arg); \
  52. } while (0)
  53. #else
  54. #define riocm_debug(level, fmt, arg...) \
  55. no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
  56. #endif
  57. #define riocm_warn(fmt, arg...) \
  58. pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
  59. #define riocm_error(fmt, arg...) \
  60. pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
  61. static int cmbox = 1;
  62. module_param(cmbox, int, S_IRUGO);
  63. MODULE_PARM_DESC(cmbox, "RapidIO Mailbox number (default 1)");
  64. static int chstart = 256;
  65. module_param(chstart, int, S_IRUGO);
  66. MODULE_PARM_DESC(chstart,
  67. "Start channel number for dynamic allocation (default 256)");
  68. #ifdef DEBUG
  69. static u32 dbg_level = DBG_NONE;
  70. module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
  71. MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
  72. #endif
  73. MODULE_AUTHOR(DRV_AUTHOR);
  74. MODULE_DESCRIPTION(DRV_DESC);
  75. MODULE_LICENSE("GPL");
  76. MODULE_VERSION(DRV_VERSION);
  77. #define RIOCM_TX_RING_SIZE 128
  78. #define RIOCM_RX_RING_SIZE 128
  79. #define RIOCM_CONNECT_TO 3 /* connect response TO (in sec) */
  80. #define RIOCM_MAX_CHNUM 0xffff /* Use full range of u16 field */
  81. #define RIOCM_CHNUM_AUTO 0
  82. #define RIOCM_MAX_EP_COUNT 0x10000 /* Max number of endpoints */
  83. enum rio_cm_state {
  84. RIO_CM_IDLE,
  85. RIO_CM_CONNECT,
  86. RIO_CM_CONNECTED,
  87. RIO_CM_DISCONNECT,
  88. RIO_CM_CHAN_BOUND,
  89. RIO_CM_LISTEN,
  90. RIO_CM_DESTROYING,
  91. };
  92. enum rio_cm_pkt_type {
  93. RIO_CM_SYS = 0xaa,
  94. RIO_CM_CHAN = 0x55,
  95. };
  96. enum rio_cm_chop {
  97. CM_CONN_REQ,
  98. CM_CONN_ACK,
  99. CM_CONN_CLOSE,
  100. CM_DATA_MSG,
  101. };
  102. struct rio_ch_base_bhdr {
  103. u32 src_id;
  104. u32 dst_id;
  105. #define RIO_HDR_LETTER_MASK 0xffff0000
  106. #define RIO_HDR_MBOX_MASK 0x0000ffff
  107. u8 src_mbox;
  108. u8 dst_mbox;
  109. u8 type;
  110. } __attribute__((__packed__));
  111. struct rio_ch_chan_hdr {
  112. struct rio_ch_base_bhdr bhdr;
  113. u8 ch_op;
  114. u16 dst_ch;
  115. u16 src_ch;
  116. u16 msg_len;
  117. u16 rsrvd;
  118. } __attribute__((__packed__));
  119. struct tx_req {
  120. struct list_head node;
  121. struct rio_dev *rdev;
  122. void *buffer;
  123. size_t len;
  124. };
  125. struct cm_dev {
  126. struct list_head list;
  127. struct rio_mport *mport;
  128. void *rx_buf[RIOCM_RX_RING_SIZE];
  129. int rx_slots;
  130. struct mutex rx_lock;
  131. void *tx_buf[RIOCM_TX_RING_SIZE];
  132. int tx_slot;
  133. int tx_cnt;
  134. int tx_ack_slot;
  135. struct list_head tx_reqs;
  136. spinlock_t tx_lock;
  137. struct list_head peers;
  138. u32 npeers;
  139. struct workqueue_struct *rx_wq;
  140. struct work_struct rx_work;
  141. };
  142. struct chan_rx_ring {
  143. void *buf[RIOCM_RX_RING_SIZE];
  144. int head;
  145. int tail;
  146. int count;
  147. /* Tracking RX buffers reported to upper level */
  148. void *inuse[RIOCM_RX_RING_SIZE];
  149. int inuse_cnt;
  150. };
  151. struct rio_channel {
  152. u16 id; /* local channel ID */
  153. struct kref ref; /* channel refcount */
  154. struct file *filp;
  155. struct cm_dev *cmdev; /* associated CM device object */
  156. struct rio_dev *rdev; /* remote RapidIO device */
  157. enum rio_cm_state state;
  158. int error;
  159. spinlock_t lock;
  160. void *context;
  161. u32 loc_destid; /* local destID */
  162. u32 rem_destid; /* remote destID */
  163. u16 rem_channel; /* remote channel ID */
  164. struct list_head accept_queue;
  165. struct list_head ch_node;
  166. struct completion comp;
  167. struct completion comp_close;
  168. struct chan_rx_ring rx_ring;
  169. };
  170. struct cm_peer {
  171. struct list_head node;
  172. struct rio_dev *rdev;
  173. };
  174. struct rio_cm_work {
  175. struct work_struct work;
  176. struct cm_dev *cm;
  177. void *data;
  178. };
  179. struct conn_req {
  180. struct list_head node;
  181. u32 destid; /* requester destID */
  182. u16 chan; /* requester channel ID */
  183. struct cm_dev *cmdev;
  184. };
  185. /*
  186. * A channel_dev structure represents a CM_CDEV
  187. * @cdev Character device
  188. * @dev Associated device object
  189. */
  190. struct channel_dev {
  191. struct cdev cdev;
  192. struct device *dev;
  193. };
  194. static struct rio_channel *riocm_ch_alloc(u16 ch_num);
  195. static void riocm_ch_free(struct kref *ref);
  196. static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
  197. void *buffer, size_t len);
  198. static int riocm_ch_close(struct rio_channel *ch);
  199. static DEFINE_SPINLOCK(idr_lock);
  200. static DEFINE_IDR(ch_idr);
  201. static LIST_HEAD(cm_dev_list);
  202. static DECLARE_RWSEM(rdev_sem);
  203. static struct class *dev_class;
  204. static unsigned int dev_major;
  205. static unsigned int dev_minor_base;
  206. static dev_t dev_number;
  207. static struct channel_dev riocm_cdev;
  208. #define is_msg_capable(src_ops, dst_ops) \
  209. ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
  210. (dst_ops & RIO_DST_OPS_DATA_MSG))
  211. #define dev_cm_capable(dev) \
  212. is_msg_capable(dev->src_ops, dev->dst_ops)
  213. static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp)
  214. {
  215. int ret;
  216. spin_lock_bh(&ch->lock);
  217. ret = (ch->state == cmp);
  218. spin_unlock_bh(&ch->lock);
  219. return ret;
  220. }
  221. static int riocm_cmp_exch(struct rio_channel *ch,
  222. enum rio_cm_state cmp, enum rio_cm_state exch)
  223. {
  224. int ret;
  225. spin_lock_bh(&ch->lock);
  226. ret = (ch->state == cmp);
  227. if (ret)
  228. ch->state = exch;
  229. spin_unlock_bh(&ch->lock);
  230. return ret;
  231. }
  232. static enum rio_cm_state riocm_exch(struct rio_channel *ch,
  233. enum rio_cm_state exch)
  234. {
  235. enum rio_cm_state old;
  236. spin_lock_bh(&ch->lock);
  237. old = ch->state;
  238. ch->state = exch;
  239. spin_unlock_bh(&ch->lock);
  240. return old;
  241. }
  242. static struct rio_channel *riocm_get_channel(u16 nr)
  243. {
  244. struct rio_channel *ch;
  245. spin_lock_bh(&idr_lock);
  246. ch = idr_find(&ch_idr, nr);
  247. if (ch)
  248. kref_get(&ch->ref);
  249. spin_unlock_bh(&idr_lock);
  250. return ch;
  251. }
  252. static void riocm_put_channel(struct rio_channel *ch)
  253. {
  254. kref_put(&ch->ref, riocm_ch_free);
  255. }
  256. static void *riocm_rx_get_msg(struct cm_dev *cm)
  257. {
  258. void *msg;
  259. int i;
  260. msg = rio_get_inb_message(cm->mport, cmbox);
  261. if (msg) {
  262. for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
  263. if (cm->rx_buf[i] == msg) {
  264. cm->rx_buf[i] = NULL;
  265. cm->rx_slots++;
  266. break;
  267. }
  268. }
  269. if (i == RIOCM_RX_RING_SIZE)
  270. riocm_warn("no record for buffer 0x%p", msg);
  271. }
  272. return msg;
  273. }
  274. /*
  275. * riocm_rx_fill - fills a ring of receive buffers for given cm device
  276. * @cm: cm_dev object
  277. * @nent: max number of entries to fill
  278. *
  279. * Returns: none
  280. */
  281. static void riocm_rx_fill(struct cm_dev *cm, int nent)
  282. {
  283. int i;
  284. if (cm->rx_slots == 0)
  285. return;
  286. for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) {
  287. if (cm->rx_buf[i] == NULL) {
  288. cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL);
  289. if (cm->rx_buf[i] == NULL)
  290. break;
  291. rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]);
  292. cm->rx_slots--;
  293. nent--;
  294. }
  295. }
  296. }
  297. /*
  298. * riocm_rx_free - frees all receive buffers associated with given cm device
  299. * @cm: cm_dev object
  300. *
  301. * Returns: none
  302. */
  303. static void riocm_rx_free(struct cm_dev *cm)
  304. {
  305. int i;
  306. for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
  307. if (cm->rx_buf[i] != NULL) {
  308. kfree(cm->rx_buf[i]);
  309. cm->rx_buf[i] = NULL;
  310. }
  311. }
  312. }
  313. /*
  314. * riocm_req_handler - connection request handler
  315. * @cm: cm_dev object
  316. * @req_data: pointer to the request packet
  317. *
  318. * Returns: 0 if success, or
  319. * -EINVAL if channel is not in correct state,
  320. * -ENODEV if cannot find a channel with specified ID,
  321. * -ENOMEM if unable to allocate memory to store the request
  322. */
  323. static int riocm_req_handler(struct cm_dev *cm, void *req_data)
  324. {
  325. struct rio_channel *ch;
  326. struct conn_req *req;
  327. struct rio_ch_chan_hdr *hh = req_data;
  328. u16 chnum;
  329. chnum = ntohs(hh->dst_ch);
  330. ch = riocm_get_channel(chnum);
  331. if (!ch)
  332. return -ENODEV;
  333. if (ch->state != RIO_CM_LISTEN) {
  334. riocm_debug(RX_CMD, "channel %d is not in listen state", chnum);
  335. riocm_put_channel(ch);
  336. return -EINVAL;
  337. }
  338. req = kzalloc(sizeof(*req), GFP_KERNEL);
  339. if (!req) {
  340. riocm_put_channel(ch);
  341. return -ENOMEM;
  342. }
  343. req->destid = ntohl(hh->bhdr.src_id);
  344. req->chan = ntohs(hh->src_ch);
  345. req->cmdev = cm;
  346. spin_lock_bh(&ch->lock);
  347. list_add_tail(&req->node, &ch->accept_queue);
  348. spin_unlock_bh(&ch->lock);
  349. complete(&ch->comp);
  350. riocm_put_channel(ch);
  351. return 0;
  352. }
  353. /*
  354. * riocm_resp_handler - response to connection request handler
  355. * @resp_data: pointer to the response packet
  356. *
  357. * Returns: 0 if success, or
  358. * -EINVAL if channel is not in correct state,
  359. * -ENODEV if cannot find a channel with specified ID,
  360. */
  361. static int riocm_resp_handler(void *resp_data)
  362. {
  363. struct rio_channel *ch;
  364. struct rio_ch_chan_hdr *hh = resp_data;
  365. u16 chnum;
  366. chnum = ntohs(hh->dst_ch);
  367. ch = riocm_get_channel(chnum);
  368. if (!ch)
  369. return -ENODEV;
  370. if (ch->state != RIO_CM_CONNECT) {
  371. riocm_put_channel(ch);
  372. return -EINVAL;
  373. }
  374. riocm_exch(ch, RIO_CM_CONNECTED);
  375. ch->rem_channel = ntohs(hh->src_ch);
  376. complete(&ch->comp);
  377. riocm_put_channel(ch);
  378. return 0;
  379. }
  380. /*
  381. * riocm_close_handler - channel close request handler
  382. * @req_data: pointer to the request packet
  383. *
  384. * Returns: 0 if success, or
  385. * -ENODEV if cannot find a channel with specified ID,
  386. * + error codes returned by riocm_ch_close.
  387. */
  388. static int riocm_close_handler(void *data)
  389. {
  390. struct rio_channel *ch;
  391. struct rio_ch_chan_hdr *hh = data;
  392. int ret;
  393. riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch));
  394. spin_lock_bh(&idr_lock);
  395. ch = idr_find(&ch_idr, ntohs(hh->dst_ch));
  396. if (!ch) {
  397. spin_unlock_bh(&idr_lock);
  398. return -ENODEV;
  399. }
  400. idr_remove(&ch_idr, ch->id);
  401. spin_unlock_bh(&idr_lock);
  402. riocm_exch(ch, RIO_CM_DISCONNECT);
  403. ret = riocm_ch_close(ch);
  404. if (ret)
  405. riocm_debug(RX_CMD, "riocm_ch_close() returned %d", ret);
  406. return 0;
  407. }
  408. /*
  409. * rio_cm_handler - function that services request (non-data) packets
  410. * @cm: cm_dev object
  411. * @data: pointer to the packet
  412. */
  413. static void rio_cm_handler(struct cm_dev *cm, void *data)
  414. {
  415. struct rio_ch_chan_hdr *hdr;
  416. if (!rio_mport_is_running(cm->mport))
  417. goto out;
  418. hdr = data;
  419. riocm_debug(RX_CMD, "OP=%x for ch=%d from %d",
  420. hdr->ch_op, ntohs(hdr->dst_ch), ntohs(hdr->src_ch));
  421. switch (hdr->ch_op) {
  422. case CM_CONN_REQ:
  423. riocm_req_handler(cm, data);
  424. break;
  425. case CM_CONN_ACK:
  426. riocm_resp_handler(data);
  427. break;
  428. case CM_CONN_CLOSE:
  429. riocm_close_handler(data);
  430. break;
  431. default:
  432. riocm_error("Invalid packet header");
  433. break;
  434. }
  435. out:
  436. kfree(data);
  437. }
  438. /*
  439. * rio_rx_data_handler - received data packet handler
  440. * @cm: cm_dev object
  441. * @buf: data packet
  442. *
  443. * Returns: 0 if success, or
  444. * -ENODEV if cannot find a channel with specified ID,
  445. * -EIO if channel is not in CONNECTED state,
  446. * -ENOMEM if channel RX queue is full (packet discarded)
  447. */
  448. static int rio_rx_data_handler(struct cm_dev *cm, void *buf)
  449. {
  450. struct rio_ch_chan_hdr *hdr;
  451. struct rio_channel *ch;
  452. hdr = buf;
  453. riocm_debug(RX_DATA, "for ch=%d", ntohs(hdr->dst_ch));
  454. ch = riocm_get_channel(ntohs(hdr->dst_ch));
  455. if (!ch) {
  456. /* Discard data message for non-existing channel */
  457. kfree(buf);
  458. return -ENODEV;
  459. }
  460. /* Place pointer to the buffer into channel's RX queue */
  461. spin_lock(&ch->lock);
  462. if (ch->state != RIO_CM_CONNECTED) {
  463. /* Channel is not ready to receive data, discard a packet */
  464. riocm_debug(RX_DATA, "ch=%d is in wrong state=%d",
  465. ch->id, ch->state);
  466. spin_unlock(&ch->lock);
  467. kfree(buf);
  468. riocm_put_channel(ch);
  469. return -EIO;
  470. }
  471. if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) {
  472. /* If RX ring is full, discard a packet */
  473. riocm_debug(RX_DATA, "ch=%d is full", ch->id);
  474. spin_unlock(&ch->lock);
  475. kfree(buf);
  476. riocm_put_channel(ch);
  477. return -ENOMEM;
  478. }
  479. ch->rx_ring.buf[ch->rx_ring.head] = buf;
  480. ch->rx_ring.head++;
  481. ch->rx_ring.count++;
  482. ch->rx_ring.head %= RIOCM_RX_RING_SIZE;
  483. complete(&ch->comp);
  484. spin_unlock(&ch->lock);
  485. riocm_put_channel(ch);
  486. return 0;
  487. }
  488. /*
  489. * rio_ibmsg_handler - inbound message packet handler
  490. */
  491. static void rio_ibmsg_handler(struct work_struct *work)
  492. {
  493. struct cm_dev *cm = container_of(work, struct cm_dev, rx_work);
  494. void *data;
  495. struct rio_ch_chan_hdr *hdr;
  496. if (!rio_mport_is_running(cm->mport))
  497. return;
  498. while (1) {
  499. mutex_lock(&cm->rx_lock);
  500. data = riocm_rx_get_msg(cm);
  501. if (data)
  502. riocm_rx_fill(cm, 1);
  503. mutex_unlock(&cm->rx_lock);
  504. if (data == NULL)
  505. break;
  506. hdr = data;
  507. if (hdr->bhdr.type != RIO_CM_CHAN) {
  508. /* For now simply discard packets other than channel */
  509. riocm_error("Unsupported TYPE code (0x%x). Msg dropped",
  510. hdr->bhdr.type);
  511. kfree(data);
  512. continue;
  513. }
  514. /* Process a channel message */
  515. if (hdr->ch_op == CM_DATA_MSG)
  516. rio_rx_data_handler(cm, data);
  517. else
  518. rio_cm_handler(cm, data);
  519. }
  520. }
  521. static void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id,
  522. int mbox, int slot)
  523. {
  524. struct cm_dev *cm = dev_id;
  525. if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work))
  526. queue_work(cm->rx_wq, &cm->rx_work);
  527. }
  528. /*
  529. * rio_txcq_handler - TX completion handler
  530. * @cm: cm_dev object
  531. * @slot: TX queue slot
  532. *
  533. * TX completion handler also ensures that pending request packets are placed
  534. * into transmit queue as soon as a free slot becomes available. This is done
  535. * to give higher priority to request packets during high intensity data flow.
  536. */
  537. static void rio_txcq_handler(struct cm_dev *cm, int slot)
  538. {
  539. int ack_slot;
  540. /* ATTN: Add TX completion notification if/when direct buffer
  541. * transfer is implemented. At this moment only correct tracking
  542. * of tx_count is important.
  543. */
  544. riocm_debug(TX_EVENT, "for mport_%d slot %d tx_cnt %d",
  545. cm->mport->id, slot, cm->tx_cnt);
  546. spin_lock(&cm->tx_lock);
  547. ack_slot = cm->tx_ack_slot;
  548. if (ack_slot == slot)
  549. riocm_debug(TX_EVENT, "slot == ack_slot");
  550. while (cm->tx_cnt && ((ack_slot != slot) ||
  551. (cm->tx_cnt == RIOCM_TX_RING_SIZE))) {
  552. cm->tx_buf[ack_slot] = NULL;
  553. ++ack_slot;
  554. ack_slot &= (RIOCM_TX_RING_SIZE - 1);
  555. cm->tx_cnt--;
  556. }
  557. if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE)
  558. riocm_error("tx_cnt %d out of sync", cm->tx_cnt);
  559. WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE));
  560. cm->tx_ack_slot = ack_slot;
  561. /*
  562. * If there are pending requests, insert them into transmit queue
  563. */
  564. if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) {
  565. struct tx_req *req, *_req;
  566. int rc;
  567. list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) {
  568. list_del(&req->node);
  569. cm->tx_buf[cm->tx_slot] = req->buffer;
  570. rc = rio_add_outb_message(cm->mport, req->rdev, cmbox,
  571. req->buffer, req->len);
  572. kfree(req->buffer);
  573. kfree(req);
  574. ++cm->tx_cnt;
  575. ++cm->tx_slot;
  576. cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
  577. if (cm->tx_cnt == RIOCM_TX_RING_SIZE)
  578. break;
  579. }
  580. }
  581. spin_unlock(&cm->tx_lock);
  582. }
  583. static void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id,
  584. int mbox, int slot)
  585. {
  586. struct cm_dev *cm = dev_id;
  587. if (cm && rio_mport_is_running(cm->mport))
  588. rio_txcq_handler(cm, slot);
  589. }
  590. static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev,
  591. void *buffer, size_t len)
  592. {
  593. unsigned long flags;
  594. struct tx_req *treq;
  595. treq = kzalloc(sizeof(*treq), GFP_KERNEL);
  596. if (treq == NULL)
  597. return -ENOMEM;
  598. treq->rdev = rdev;
  599. treq->buffer = buffer;
  600. treq->len = len;
  601. spin_lock_irqsave(&cm->tx_lock, flags);
  602. list_add_tail(&treq->node, &cm->tx_reqs);
  603. spin_unlock_irqrestore(&cm->tx_lock, flags);
  604. return 0;
  605. }
  606. /*
  607. * riocm_post_send - helper function that places packet into msg TX queue
  608. * @cm: cm_dev object
  609. * @rdev: target RapidIO device object (required by outbound msg interface)
  610. * @buffer: pointer to a packet buffer to send
  611. * @len: length of data to transfer
  612. * @req: request priority flag
  613. *
  614. * Returns: 0 if success, or error code otherwise.
  615. */
  616. static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
  617. void *buffer, size_t len)
  618. {
  619. int rc;
  620. unsigned long flags;
  621. spin_lock_irqsave(&cm->tx_lock, flags);
  622. if (cm->mport == NULL) {
  623. rc = -ENODEV;
  624. goto err_out;
  625. }
  626. if (cm->tx_cnt == RIOCM_TX_RING_SIZE) {
  627. riocm_debug(TX, "Tx Queue is full");
  628. rc = -EBUSY;
  629. goto err_out;
  630. }
  631. cm->tx_buf[cm->tx_slot] = buffer;
  632. rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len);
  633. riocm_debug(TX, "Add buf@%p destid=%x tx_slot=%d tx_cnt=%d",
  634. buffer, rdev->destid, cm->tx_slot, cm->tx_cnt);
  635. ++cm->tx_cnt;
  636. ++cm->tx_slot;
  637. cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
  638. err_out:
  639. spin_unlock_irqrestore(&cm->tx_lock, flags);
  640. return rc;
  641. }
  642. /*
  643. * riocm_ch_send - sends a data packet to a remote device
  644. * @ch_id: local channel ID
  645. * @buf: pointer to a data buffer to send (including CM header)
  646. * @len: length of data to transfer (including CM header)
  647. *
  648. * ATTN: ASSUMES THAT THE HEADER SPACE IS RESERVED PART OF THE DATA PACKET
  649. *
  650. * Returns: 0 if success, or
  651. * -EINVAL if one or more input parameters is/are not valid,
  652. * -ENODEV if cannot find a channel with specified ID,
  653. * -EAGAIN if a channel is not in CONNECTED state,
  654. * + error codes returned by HW send routine.
  655. */
  656. static int riocm_ch_send(u16 ch_id, void *buf, int len)
  657. {
  658. struct rio_channel *ch;
  659. struct rio_ch_chan_hdr *hdr;
  660. int ret;
  661. if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE)
  662. return -EINVAL;
  663. ch = riocm_get_channel(ch_id);
  664. if (!ch) {
  665. riocm_error("%s(%d) ch_%d not found", current->comm,
  666. task_pid_nr(current), ch_id);
  667. return -ENODEV;
  668. }
  669. if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
  670. ret = -EAGAIN;
  671. goto err_out;
  672. }
  673. /*
  674. * Fill buffer header section with corresponding channel data
  675. */
  676. hdr = buf;
  677. hdr->bhdr.src_id = htonl(ch->loc_destid);
  678. hdr->bhdr.dst_id = htonl(ch->rem_destid);
  679. hdr->bhdr.src_mbox = cmbox;
  680. hdr->bhdr.dst_mbox = cmbox;
  681. hdr->bhdr.type = RIO_CM_CHAN;
  682. hdr->ch_op = CM_DATA_MSG;
  683. hdr->dst_ch = htons(ch->rem_channel);
  684. hdr->src_ch = htons(ch->id);
  685. hdr->msg_len = htons((u16)len);
  686. /* ATTN: the function call below relies on the fact that underlying
  687. * HW-specific add_outb_message() routine copies TX data into its own
  688. * internal transfer buffer (true for all RIONET compatible mport
  689. * drivers). Must be reviewed if mport driver uses the buffer directly.
  690. */
  691. ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len);
  692. if (ret)
  693. riocm_debug(TX, "ch %d send_err=%d", ch->id, ret);
  694. err_out:
  695. riocm_put_channel(ch);
  696. return ret;
  697. }
  698. static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf)
  699. {
  700. int i, ret = -EINVAL;
  701. spin_lock_bh(&ch->lock);
  702. for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
  703. if (ch->rx_ring.inuse[i] == buf) {
  704. ch->rx_ring.inuse[i] = NULL;
  705. ch->rx_ring.inuse_cnt--;
  706. ret = 0;
  707. break;
  708. }
  709. }
  710. spin_unlock_bh(&ch->lock);
  711. if (!ret)
  712. kfree(buf);
  713. return ret;
  714. }
  715. /*
  716. * riocm_ch_receive - fetch a data packet received for the specified channel
  717. * @ch: local channel ID
  718. * @buf: pointer to a packet buffer
  719. * @timeout: timeout to wait for incoming packet (in jiffies)
  720. *
  721. * Returns: 0 and valid buffer pointer if success, or NULL pointer and one of:
  722. * -EAGAIN if a channel is not in CONNECTED state,
  723. * -ENOMEM if in-use tracking queue is full,
  724. * -ETIME if wait timeout expired,
  725. * -EINTR if wait was interrupted.
  726. */
  727. static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout)
  728. {
  729. void *rxmsg = NULL;
  730. int i, ret = 0;
  731. long wret;
  732. if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
  733. ret = -EAGAIN;
  734. goto out;
  735. }
  736. if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) {
  737. /* If we do not have entries to track buffers given to upper
  738. * layer, reject request.
  739. */
  740. ret = -ENOMEM;
  741. goto out;
  742. }
  743. wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout);
  744. riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret);
  745. if (!wret)
  746. ret = -ETIME;
  747. else if (wret == -ERESTARTSYS)
  748. ret = -EINTR;
  749. else
  750. ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET;
  751. if (ret)
  752. goto out;
  753. spin_lock_bh(&ch->lock);
  754. rxmsg = ch->rx_ring.buf[ch->rx_ring.tail];
  755. ch->rx_ring.buf[ch->rx_ring.tail] = NULL;
  756. ch->rx_ring.count--;
  757. ch->rx_ring.tail++;
  758. ch->rx_ring.tail %= RIOCM_RX_RING_SIZE;
  759. ret = -ENOMEM;
  760. for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
  761. if (ch->rx_ring.inuse[i] == NULL) {
  762. ch->rx_ring.inuse[i] = rxmsg;
  763. ch->rx_ring.inuse_cnt++;
  764. ret = 0;
  765. break;
  766. }
  767. }
  768. if (ret) {
  769. /* We have no entry to store pending message: drop it */
  770. kfree(rxmsg);
  771. rxmsg = NULL;
  772. }
  773. spin_unlock_bh(&ch->lock);
  774. out:
  775. *buf = rxmsg;
  776. return ret;
  777. }
  778. /*
  779. * riocm_ch_connect - sends a connect request to a remote device
  780. * @loc_ch: local channel ID
  781. * @cm: CM device to send connect request
  782. * @peer: target RapidIO device
  783. * @rem_ch: remote channel ID
  784. *
  785. * Returns: 0 if success, or
  786. * -EINVAL if the channel is not in IDLE state,
  787. * -EAGAIN if no connection request available immediately,
  788. * -ETIME if ACK response timeout expired,
  789. * -EINTR if wait for response was interrupted.
  790. */
  791. static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm,
  792. struct cm_peer *peer, u16 rem_ch)
  793. {
  794. struct rio_channel *ch = NULL;
  795. struct rio_ch_chan_hdr *hdr;
  796. int ret;
  797. long wret;
  798. ch = riocm_get_channel(loc_ch);
  799. if (!ch)
  800. return -ENODEV;
  801. if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) {
  802. ret = -EINVAL;
  803. goto conn_done;
  804. }
  805. ch->cmdev = cm;
  806. ch->rdev = peer->rdev;
  807. ch->context = NULL;
  808. ch->loc_destid = cm->mport->host_deviceid;
  809. ch->rem_channel = rem_ch;
  810. /*
  811. * Send connect request to the remote RapidIO device
  812. */
  813. hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
  814. if (hdr == NULL) {
  815. ret = -ENOMEM;
  816. goto conn_done;
  817. }
  818. hdr->bhdr.src_id = htonl(ch->loc_destid);
  819. hdr->bhdr.dst_id = htonl(peer->rdev->destid);
  820. hdr->bhdr.src_mbox = cmbox;
  821. hdr->bhdr.dst_mbox = cmbox;
  822. hdr->bhdr.type = RIO_CM_CHAN;
  823. hdr->ch_op = CM_CONN_REQ;
  824. hdr->dst_ch = htons(rem_ch);
  825. hdr->src_ch = htons(loc_ch);
  826. /* ATTN: the function call below relies on the fact that underlying
  827. * HW-specific add_outb_message() routine copies TX data into its
  828. * internal transfer buffer. Must be reviewed if mport driver uses
  829. * this buffer directly.
  830. */
  831. ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr));
  832. if (ret != -EBUSY) {
  833. kfree(hdr);
  834. } else {
  835. ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr));
  836. if (ret)
  837. kfree(hdr);
  838. }
  839. if (ret) {
  840. riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE);
  841. goto conn_done;
  842. }
  843. /* Wait for connect response from the remote device */
  844. wret = wait_for_completion_interruptible_timeout(&ch->comp,
  845. RIOCM_CONNECT_TO * HZ);
  846. riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
  847. if (!wret)
  848. ret = -ETIME;
  849. else if (wret == -ERESTARTSYS)
  850. ret = -EINTR;
  851. else
  852. ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1;
  853. conn_done:
  854. riocm_put_channel(ch);
  855. return ret;
  856. }
  857. static int riocm_send_ack(struct rio_channel *ch)
  858. {
  859. struct rio_ch_chan_hdr *hdr;
  860. int ret;
  861. hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
  862. if (hdr == NULL)
  863. return -ENOMEM;
  864. hdr->bhdr.src_id = htonl(ch->loc_destid);
  865. hdr->bhdr.dst_id = htonl(ch->rem_destid);
  866. hdr->dst_ch = htons(ch->rem_channel);
  867. hdr->src_ch = htons(ch->id);
  868. hdr->bhdr.src_mbox = cmbox;
  869. hdr->bhdr.dst_mbox = cmbox;
  870. hdr->bhdr.type = RIO_CM_CHAN;
  871. hdr->ch_op = CM_CONN_ACK;
  872. /* ATTN: the function call below relies on the fact that underlying
  873. * add_outb_message() routine copies TX data into its internal transfer
  874. * buffer. Review if switching to direct buffer version.
  875. */
  876. ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
  877. if (ret == -EBUSY && !riocm_queue_req(ch->cmdev,
  878. ch->rdev, hdr, sizeof(*hdr)))
  879. return 0;
  880. kfree(hdr);
  881. if (ret)
  882. riocm_error("send ACK to ch_%d on %s failed (ret=%d)",
  883. ch->id, rio_name(ch->rdev), ret);
  884. return ret;
  885. }
  886. /*
  887. * riocm_ch_accept - accept incoming connection request
  888. * @ch_id: channel ID
  889. * @new_ch_id: local mport device
  890. * @timeout: wait timeout (if 0 non-blocking call, do not wait if connection
  891. * request is not available).
  892. *
  893. * Returns: pointer to new channel struct if success, or error-valued pointer:
  894. * -ENODEV - cannot find specified channel or mport,
  895. * -EINVAL - the channel is not in IDLE state,
  896. * -EAGAIN - no connection request available immediately (timeout=0),
  897. * -ENOMEM - unable to allocate new channel,
  898. * -ETIME - wait timeout expired,
  899. * -EINTR - wait was interrupted.
  900. */
  901. static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
  902. long timeout)
  903. {
  904. struct rio_channel *ch;
  905. struct rio_channel *new_ch;
  906. struct conn_req *req;
  907. struct cm_peer *peer;
  908. int found = 0;
  909. int err = 0;
  910. long wret;
  911. ch = riocm_get_channel(ch_id);
  912. if (!ch)
  913. return ERR_PTR(-EINVAL);
  914. if (!riocm_cmp(ch, RIO_CM_LISTEN)) {
  915. err = -EINVAL;
  916. goto err_put;
  917. }
  918. /* Don't sleep if this is a non blocking call */
  919. if (!timeout) {
  920. if (!try_wait_for_completion(&ch->comp)) {
  921. err = -EAGAIN;
  922. goto err_put;
  923. }
  924. } else {
  925. riocm_debug(WAIT, "on %d", ch->id);
  926. wret = wait_for_completion_interruptible_timeout(&ch->comp,
  927. timeout);
  928. if (!wret) {
  929. err = -ETIME;
  930. goto err_put;
  931. } else if (wret == -ERESTARTSYS) {
  932. err = -EINTR;
  933. goto err_put;
  934. }
  935. }
  936. spin_lock_bh(&ch->lock);
  937. if (ch->state != RIO_CM_LISTEN) {
  938. err = -ECANCELED;
  939. } else if (list_empty(&ch->accept_queue)) {
  940. riocm_debug(WAIT, "on %d accept_queue is empty on completion",
  941. ch->id);
  942. err = -EIO;
  943. }
  944. spin_unlock_bh(&ch->lock);
  945. if (err) {
  946. riocm_debug(WAIT, "on %d returns %d", ch->id, err);
  947. goto err_put;
  948. }
  949. /* Create new channel for this connection */
  950. new_ch = riocm_ch_alloc(RIOCM_CHNUM_AUTO);
  951. if (IS_ERR(new_ch)) {
  952. riocm_error("failed to get channel for new req (%ld)",
  953. PTR_ERR(new_ch));
  954. err = -ENOMEM;
  955. goto err_put;
  956. }
  957. spin_lock_bh(&ch->lock);
  958. req = list_first_entry(&ch->accept_queue, struct conn_req, node);
  959. list_del(&req->node);
  960. new_ch->cmdev = ch->cmdev;
  961. new_ch->loc_destid = ch->loc_destid;
  962. new_ch->rem_destid = req->destid;
  963. new_ch->rem_channel = req->chan;
  964. spin_unlock_bh(&ch->lock);
  965. riocm_put_channel(ch);
  966. ch = NULL;
  967. kfree(req);
  968. down_read(&rdev_sem);
  969. /* Find requester's device object */
  970. list_for_each_entry(peer, &new_ch->cmdev->peers, node) {
  971. if (peer->rdev->destid == new_ch->rem_destid) {
  972. riocm_debug(RX_CMD, "found matching device(%s)",
  973. rio_name(peer->rdev));
  974. found = 1;
  975. break;
  976. }
  977. }
  978. up_read(&rdev_sem);
  979. if (!found) {
  980. /* If peer device object not found, simply ignore the request */
  981. err = -ENODEV;
  982. goto err_put_new_ch;
  983. }
  984. new_ch->rdev = peer->rdev;
  985. new_ch->state = RIO_CM_CONNECTED;
  986. spin_lock_init(&new_ch->lock);
  987. /* Acknowledge the connection request. */
  988. riocm_send_ack(new_ch);
  989. *new_ch_id = new_ch->id;
  990. return new_ch;
  991. err_put_new_ch:
  992. spin_lock_bh(&idr_lock);
  993. idr_remove(&ch_idr, new_ch->id);
  994. spin_unlock_bh(&idr_lock);
  995. riocm_put_channel(new_ch);
  996. err_put:
  997. if (ch)
  998. riocm_put_channel(ch);
  999. *new_ch_id = 0;
  1000. return ERR_PTR(err);
  1001. }
  1002. /*
  1003. * riocm_ch_listen - puts a channel into LISTEN state
  1004. * @ch_id: channel ID
  1005. *
  1006. * Returns: 0 if success, or
  1007. * -EINVAL if the specified channel does not exists or
  1008. * is not in CHAN_BOUND state.
  1009. */
  1010. static int riocm_ch_listen(u16 ch_id)
  1011. {
  1012. struct rio_channel *ch = NULL;
  1013. int ret = 0;
  1014. riocm_debug(CHOP, "(ch_%d)", ch_id);
  1015. ch = riocm_get_channel(ch_id);
  1016. if (!ch)
  1017. return -EINVAL;
  1018. if (!riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN))
  1019. ret = -EINVAL;
  1020. riocm_put_channel(ch);
  1021. return ret;
  1022. }
  1023. /*
  1024. * riocm_ch_bind - associate a channel object and an mport device
  1025. * @ch_id: channel ID
  1026. * @mport_id: local mport device ID
  1027. * @context: pointer to the additional caller's context
  1028. *
  1029. * Returns: 0 if success, or
  1030. * -ENODEV if cannot find specified mport,
  1031. * -EINVAL if the specified channel does not exist or
  1032. * is not in IDLE state.
  1033. */
  1034. static int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context)
  1035. {
  1036. struct rio_channel *ch = NULL;
  1037. struct cm_dev *cm;
  1038. int rc = -ENODEV;
  1039. riocm_debug(CHOP, "ch_%d to mport_%d", ch_id, mport_id);
  1040. /* Find matching cm_dev object */
  1041. down_read(&rdev_sem);
  1042. list_for_each_entry(cm, &cm_dev_list, list) {
  1043. if ((cm->mport->id == mport_id) &&
  1044. rio_mport_is_running(cm->mport)) {
  1045. rc = 0;
  1046. break;
  1047. }
  1048. }
  1049. if (rc)
  1050. goto exit;
  1051. ch = riocm_get_channel(ch_id);
  1052. if (!ch) {
  1053. rc = -EINVAL;
  1054. goto exit;
  1055. }
  1056. spin_lock_bh(&ch->lock);
  1057. if (ch->state != RIO_CM_IDLE) {
  1058. spin_unlock_bh(&ch->lock);
  1059. rc = -EINVAL;
  1060. goto err_put;
  1061. }
  1062. ch->cmdev = cm;
  1063. ch->loc_destid = cm->mport->host_deviceid;
  1064. ch->context = context;
  1065. ch->state = RIO_CM_CHAN_BOUND;
  1066. spin_unlock_bh(&ch->lock);
  1067. err_put:
  1068. riocm_put_channel(ch);
  1069. exit:
  1070. up_read(&rdev_sem);
  1071. return rc;
  1072. }
  1073. /*
  1074. * riocm_ch_alloc - channel object allocation helper routine
  1075. * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
  1076. *
  1077. * Return value: pointer to newly created channel object,
  1078. * or error-valued pointer
  1079. */
  1080. static struct rio_channel *riocm_ch_alloc(u16 ch_num)
  1081. {
  1082. int id;
  1083. int start, end;
  1084. struct rio_channel *ch;
  1085. ch = kzalloc(sizeof(*ch), GFP_KERNEL);
  1086. if (!ch)
  1087. return ERR_PTR(-ENOMEM);
  1088. if (ch_num) {
  1089. /* If requested, try to obtain the specified channel ID */
  1090. start = ch_num;
  1091. end = ch_num + 1;
  1092. } else {
  1093. /* Obtain channel ID from the dynamic allocation range */
  1094. start = chstart;
  1095. end = RIOCM_MAX_CHNUM + 1;
  1096. }
  1097. idr_preload(GFP_KERNEL);
  1098. spin_lock_bh(&idr_lock);
  1099. id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT);
  1100. spin_unlock_bh(&idr_lock);
  1101. idr_preload_end();
  1102. if (id < 0) {
  1103. kfree(ch);
  1104. return ERR_PTR(id == -ENOSPC ? -EBUSY : id);
  1105. }
  1106. ch->id = (u16)id;
  1107. ch->state = RIO_CM_IDLE;
  1108. spin_lock_init(&ch->lock);
  1109. INIT_LIST_HEAD(&ch->accept_queue);
  1110. INIT_LIST_HEAD(&ch->ch_node);
  1111. init_completion(&ch->comp);
  1112. init_completion(&ch->comp_close);
  1113. kref_init(&ch->ref);
  1114. ch->rx_ring.head = 0;
  1115. ch->rx_ring.tail = 0;
  1116. ch->rx_ring.count = 0;
  1117. ch->rx_ring.inuse_cnt = 0;
  1118. return ch;
  1119. }
  1120. /*
  1121. * riocm_ch_create - creates a new channel object and allocates ID for it
  1122. * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
  1123. *
  1124. * Allocates and initializes a new channel object. If the parameter ch_num > 0
  1125. * and is within the valid range, riocm_ch_create tries to allocate the
  1126. * specified ID for the new channel. If ch_num = 0, channel ID will be assigned
  1127. * automatically from the range (chstart ... RIOCM_MAX_CHNUM).
  1128. * Module parameter 'chstart' defines start of an ID range available for dynamic
  1129. * allocation. Range below 'chstart' is reserved for pre-defined ID numbers.
  1130. * Available channel numbers are limited by 16-bit size of channel numbers used
  1131. * in the packet header.
  1132. *
  1133. * Return value: PTR to rio_channel structure if successful (with channel number
  1134. * updated via pointer) or error-valued pointer if error.
  1135. */
  1136. static struct rio_channel *riocm_ch_create(u16 *ch_num)
  1137. {
  1138. struct rio_channel *ch = NULL;
  1139. ch = riocm_ch_alloc(*ch_num);
  1140. if (IS_ERR(ch))
  1141. riocm_debug(CHOP, "Failed to allocate channel %d (err=%ld)",
  1142. *ch_num, PTR_ERR(ch));
  1143. else
  1144. *ch_num = ch->id;
  1145. return ch;
  1146. }
  1147. /*
  1148. * riocm_ch_free - channel object release routine
  1149. * @ref: pointer to a channel's kref structure
  1150. */
  1151. static void riocm_ch_free(struct kref *ref)
  1152. {
  1153. struct rio_channel *ch = container_of(ref, struct rio_channel, ref);
  1154. int i;
  1155. riocm_debug(CHOP, "(ch_%d)", ch->id);
  1156. if (ch->rx_ring.inuse_cnt) {
  1157. for (i = 0;
  1158. i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) {
  1159. if (ch->rx_ring.inuse[i] != NULL) {
  1160. kfree(ch->rx_ring.inuse[i]);
  1161. ch->rx_ring.inuse_cnt--;
  1162. }
  1163. }
  1164. }
  1165. if (ch->rx_ring.count)
  1166. for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) {
  1167. if (ch->rx_ring.buf[i] != NULL) {
  1168. kfree(ch->rx_ring.buf[i]);
  1169. ch->rx_ring.count--;
  1170. }
  1171. }
  1172. complete(&ch->comp_close);
  1173. }
  1174. static int riocm_send_close(struct rio_channel *ch)
  1175. {
  1176. struct rio_ch_chan_hdr *hdr;
  1177. int ret;
  1178. /*
  1179. * Send CH_CLOSE notification to the remote RapidIO device
  1180. */
  1181. hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
  1182. if (hdr == NULL)
  1183. return -ENOMEM;
  1184. hdr->bhdr.src_id = htonl(ch->loc_destid);
  1185. hdr->bhdr.dst_id = htonl(ch->rem_destid);
  1186. hdr->bhdr.src_mbox = cmbox;
  1187. hdr->bhdr.dst_mbox = cmbox;
  1188. hdr->bhdr.type = RIO_CM_CHAN;
  1189. hdr->ch_op = CM_CONN_CLOSE;
  1190. hdr->dst_ch = htons(ch->rem_channel);
  1191. hdr->src_ch = htons(ch->id);
  1192. /* ATTN: the function call below relies on the fact that underlying
  1193. * add_outb_message() routine copies TX data into its internal transfer
  1194. * buffer. Needs to be reviewed if switched to direct buffer mode.
  1195. */
  1196. ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
  1197. if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev,
  1198. hdr, sizeof(*hdr)))
  1199. return 0;
  1200. kfree(hdr);
  1201. if (ret)
  1202. riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret);
  1203. return ret;
  1204. }
  1205. /*
  1206. * riocm_ch_close - closes a channel object with specified ID (by local request)
  1207. * @ch: channel to be closed
  1208. */
  1209. static int riocm_ch_close(struct rio_channel *ch)
  1210. {
  1211. unsigned long tmo = msecs_to_jiffies(3000);
  1212. enum rio_cm_state state;
  1213. long wret;
  1214. int ret = 0;
  1215. riocm_debug(CHOP, "ch_%d by %s(%d)",
  1216. ch->id, current->comm, task_pid_nr(current));
  1217. state = riocm_exch(ch, RIO_CM_DESTROYING);
  1218. if (state == RIO_CM_CONNECTED)
  1219. riocm_send_close(ch);
  1220. complete_all(&ch->comp);
  1221. riocm_put_channel(ch);
  1222. wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo);
  1223. riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
  1224. if (wret == 0) {
  1225. /* Timeout on wait occurred */
  1226. riocm_debug(CHOP, "%s(%d) timed out waiting for ch %d",
  1227. current->comm, task_pid_nr(current), ch->id);
  1228. ret = -ETIMEDOUT;
  1229. } else if (wret == -ERESTARTSYS) {
  1230. /* Wait_for_completion was interrupted by a signal */
  1231. riocm_debug(CHOP, "%s(%d) wait for ch %d was interrupted",
  1232. current->comm, task_pid_nr(current), ch->id);
  1233. ret = -EINTR;
  1234. }
  1235. if (!ret) {
  1236. riocm_debug(CHOP, "ch_%d resources released", ch->id);
  1237. kfree(ch);
  1238. } else {
  1239. riocm_debug(CHOP, "failed to release ch_%d resources", ch->id);
  1240. }
  1241. return ret;
  1242. }
  1243. /*
  1244. * riocm_cdev_open() - Open character device
  1245. */
  1246. static int riocm_cdev_open(struct inode *inode, struct file *filp)
  1247. {
  1248. riocm_debug(INIT, "by %s(%d) filp=%p ",
  1249. current->comm, task_pid_nr(current), filp);
  1250. if (list_empty(&cm_dev_list))
  1251. return -ENODEV;
  1252. return 0;
  1253. }
  1254. /*
  1255. * riocm_cdev_release() - Release character device
  1256. */
  1257. static int riocm_cdev_release(struct inode *inode, struct file *filp)
  1258. {
  1259. struct rio_channel *ch, *_c;
  1260. unsigned int i;
  1261. LIST_HEAD(list);
  1262. riocm_debug(EXIT, "by %s(%d) filp=%p",
  1263. current->comm, task_pid_nr(current), filp);
  1264. /* Check if there are channels associated with this file descriptor */
  1265. spin_lock_bh(&idr_lock);
  1266. idr_for_each_entry(&ch_idr, ch, i) {
  1267. if (ch && ch->filp == filp) {
  1268. riocm_debug(EXIT, "ch_%d not released by %s(%d)",
  1269. ch->id, current->comm,
  1270. task_pid_nr(current));
  1271. idr_remove(&ch_idr, ch->id);
  1272. list_add(&ch->ch_node, &list);
  1273. }
  1274. }
  1275. spin_unlock_bh(&idr_lock);
  1276. if (!list_empty(&list)) {
  1277. list_for_each_entry_safe(ch, _c, &list, ch_node) {
  1278. list_del(&ch->ch_node);
  1279. riocm_ch_close(ch);
  1280. }
  1281. }
  1282. return 0;
  1283. }
  1284. /*
  1285. * cm_ep_get_list_size() - Reports number of endpoints in the network
  1286. */
  1287. static int cm_ep_get_list_size(void __user *arg)
  1288. {
  1289. u32 __user *p = arg;
  1290. u32 mport_id;
  1291. u32 count = 0;
  1292. struct cm_dev *cm;
  1293. if (get_user(mport_id, p))
  1294. return -EFAULT;
  1295. if (mport_id >= RIO_MAX_MPORTS)
  1296. return -EINVAL;
  1297. /* Find a matching cm_dev object */
  1298. down_read(&rdev_sem);
  1299. list_for_each_entry(cm, &cm_dev_list, list) {
  1300. if (cm->mport->id == mport_id) {
  1301. count = cm->npeers;
  1302. up_read(&rdev_sem);
  1303. if (copy_to_user(arg, &count, sizeof(u32)))
  1304. return -EFAULT;
  1305. return 0;
  1306. }
  1307. }
  1308. up_read(&rdev_sem);
  1309. return -ENODEV;
  1310. }
  1311. /*
  1312. * cm_ep_get_list() - Returns list of attached endpoints
  1313. */
  1314. static int cm_ep_get_list(void __user *arg)
  1315. {
  1316. struct cm_dev *cm;
  1317. struct cm_peer *peer;
  1318. u32 info[2];
  1319. void *buf;
  1320. u32 nent;
  1321. u32 *entry_ptr;
  1322. u32 i = 0;
  1323. int ret = 0;
  1324. if (copy_from_user(&info, arg, sizeof(info)))
  1325. return -EFAULT;
  1326. if (info[1] >= RIO_MAX_MPORTS || info[0] > RIOCM_MAX_EP_COUNT)
  1327. return -EINVAL;
  1328. /* Find a matching cm_dev object */
  1329. down_read(&rdev_sem);
  1330. list_for_each_entry(cm, &cm_dev_list, list)
  1331. if (cm->mport->id == (u8)info[1])
  1332. goto found;
  1333. up_read(&rdev_sem);
  1334. return -ENODEV;
  1335. found:
  1336. nent = min(info[0], cm->npeers);
  1337. buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL);
  1338. if (!buf) {
  1339. up_read(&rdev_sem);
  1340. return -ENOMEM;
  1341. }
  1342. entry_ptr = (u32 *)((uintptr_t)buf + 2*sizeof(u32));
  1343. list_for_each_entry(peer, &cm->peers, node) {
  1344. *entry_ptr = (u32)peer->rdev->destid;
  1345. entry_ptr++;
  1346. if (++i == nent)
  1347. break;
  1348. }
  1349. up_read(&rdev_sem);
  1350. ((u32 *)buf)[0] = i; /* report an updated number of entries */
  1351. ((u32 *)buf)[1] = info[1]; /* put back an mport ID */
  1352. if (copy_to_user(arg, buf, sizeof(u32) * (info[0] + 2)))
  1353. ret = -EFAULT;
  1354. kfree(buf);
  1355. return ret;
  1356. }
  1357. /*
  1358. * cm_mport_get_list() - Returns list of available local mport devices
  1359. */
  1360. static int cm_mport_get_list(void __user *arg)
  1361. {
  1362. int ret = 0;
  1363. u32 entries;
  1364. void *buf;
  1365. struct cm_dev *cm;
  1366. u32 *entry_ptr;
  1367. int count = 0;
  1368. if (copy_from_user(&entries, arg, sizeof(entries)))
  1369. return -EFAULT;
  1370. if (entries == 0 || entries > RIO_MAX_MPORTS)
  1371. return -EINVAL;
  1372. buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL);
  1373. if (!buf)
  1374. return -ENOMEM;
  1375. /* Scan all registered cm_dev objects */
  1376. entry_ptr = (u32 *)((uintptr_t)buf + sizeof(u32));
  1377. down_read(&rdev_sem);
  1378. list_for_each_entry(cm, &cm_dev_list, list) {
  1379. if (count++ < entries) {
  1380. *entry_ptr = (cm->mport->id << 16) |
  1381. cm->mport->host_deviceid;
  1382. entry_ptr++;
  1383. }
  1384. }
  1385. up_read(&rdev_sem);
  1386. *((u32 *)buf) = count; /* report a real number of entries */
  1387. if (copy_to_user(arg, buf, sizeof(u32) * (count + 1)))
  1388. ret = -EFAULT;
  1389. kfree(buf);
  1390. return ret;
  1391. }
  1392. /*
  1393. * cm_chan_create() - Create a message exchange channel
  1394. */
  1395. static int cm_chan_create(struct file *filp, void __user *arg)
  1396. {
  1397. u16 __user *p = arg;
  1398. u16 ch_num;
  1399. struct rio_channel *ch;
  1400. if (get_user(ch_num, p))
  1401. return -EFAULT;
  1402. riocm_debug(CHOP, "ch_%d requested by %s(%d)",
  1403. ch_num, current->comm, task_pid_nr(current));
  1404. ch = riocm_ch_create(&ch_num);
  1405. if (IS_ERR(ch))
  1406. return PTR_ERR(ch);
  1407. ch->filp = filp;
  1408. riocm_debug(CHOP, "ch_%d created by %s(%d)",
  1409. ch_num, current->comm, task_pid_nr(current));
  1410. return put_user(ch_num, p);
  1411. }
  1412. /*
  1413. * cm_chan_close() - Close channel
  1414. * @filp: Pointer to file object
  1415. * @arg: Channel to close
  1416. */
  1417. static int cm_chan_close(struct file *filp, void __user *arg)
  1418. {
  1419. u16 __user *p = arg;
  1420. u16 ch_num;
  1421. struct rio_channel *ch;
  1422. if (get_user(ch_num, p))
  1423. return -EFAULT;
  1424. riocm_debug(CHOP, "ch_%d by %s(%d)",
  1425. ch_num, current->comm, task_pid_nr(current));
  1426. spin_lock_bh(&idr_lock);
  1427. ch = idr_find(&ch_idr, ch_num);
  1428. if (!ch) {
  1429. spin_unlock_bh(&idr_lock);
  1430. return 0;
  1431. }
  1432. if (ch->filp != filp) {
  1433. spin_unlock_bh(&idr_lock);
  1434. return -EINVAL;
  1435. }
  1436. idr_remove(&ch_idr, ch->id);
  1437. spin_unlock_bh(&idr_lock);
  1438. return riocm_ch_close(ch);
  1439. }
  1440. /*
  1441. * cm_chan_bind() - Bind channel
  1442. * @arg: Channel number
  1443. */
  1444. static int cm_chan_bind(void __user *arg)
  1445. {
  1446. struct rio_cm_channel chan;
  1447. if (copy_from_user(&chan, arg, sizeof(chan)))
  1448. return -EFAULT;
  1449. if (chan.mport_id >= RIO_MAX_MPORTS)
  1450. return -EINVAL;
  1451. return riocm_ch_bind(chan.id, chan.mport_id, NULL);
  1452. }
  1453. /*
  1454. * cm_chan_listen() - Listen on channel
  1455. * @arg: Channel number
  1456. */
  1457. static int cm_chan_listen(void __user *arg)
  1458. {
  1459. u16 __user *p = arg;
  1460. u16 ch_num;
  1461. if (get_user(ch_num, p))
  1462. return -EFAULT;
  1463. return riocm_ch_listen(ch_num);
  1464. }
  1465. /*
  1466. * cm_chan_accept() - Accept incoming connection
  1467. * @filp: Pointer to file object
  1468. * @arg: Channel number
  1469. */
  1470. static int cm_chan_accept(struct file *filp, void __user *arg)
  1471. {
  1472. struct rio_cm_accept param;
  1473. long accept_to;
  1474. struct rio_channel *ch;
  1475. if (copy_from_user(&param, arg, sizeof(param)))
  1476. return -EFAULT;
  1477. riocm_debug(CHOP, "on ch_%d by %s(%d)",
  1478. param.ch_num, current->comm, task_pid_nr(current));
  1479. accept_to = param.wait_to ?
  1480. msecs_to_jiffies(param.wait_to) : 0;
  1481. ch = riocm_ch_accept(param.ch_num, &param.ch_num, accept_to);
  1482. if (IS_ERR(ch))
  1483. return PTR_ERR(ch);
  1484. ch->filp = filp;
  1485. riocm_debug(CHOP, "new ch_%d for %s(%d)",
  1486. ch->id, current->comm, task_pid_nr(current));
  1487. if (copy_to_user(arg, &param, sizeof(param)))
  1488. return -EFAULT;
  1489. return 0;
  1490. }
  1491. /*
  1492. * cm_chan_connect() - Connect on channel
  1493. * @arg: Channel information
  1494. */
  1495. static int cm_chan_connect(void __user *arg)
  1496. {
  1497. struct rio_cm_channel chan;
  1498. struct cm_dev *cm;
  1499. struct cm_peer *peer;
  1500. int ret = -ENODEV;
  1501. if (copy_from_user(&chan, arg, sizeof(chan)))
  1502. return -EFAULT;
  1503. if (chan.mport_id >= RIO_MAX_MPORTS)
  1504. return -EINVAL;
  1505. down_read(&rdev_sem);
  1506. /* Find matching cm_dev object */
  1507. list_for_each_entry(cm, &cm_dev_list, list) {
  1508. if (cm->mport->id == chan.mport_id) {
  1509. ret = 0;
  1510. break;
  1511. }
  1512. }
  1513. if (ret)
  1514. goto err_out;
  1515. if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) {
  1516. ret = -EINVAL;
  1517. goto err_out;
  1518. }
  1519. /* Find corresponding RapidIO endpoint device object */
  1520. ret = -ENODEV;
  1521. list_for_each_entry(peer, &cm->peers, node) {
  1522. if (peer->rdev->destid == chan.remote_destid) {
  1523. ret = 0;
  1524. break;
  1525. }
  1526. }
  1527. if (ret)
  1528. goto err_out;
  1529. up_read(&rdev_sem);
  1530. return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel);
  1531. err_out:
  1532. up_read(&rdev_sem);
  1533. return ret;
  1534. }
  1535. /*
  1536. * cm_chan_msg_send() - Send a message through channel
  1537. * @arg: Outbound message information
  1538. */
  1539. static int cm_chan_msg_send(void __user *arg)
  1540. {
  1541. struct rio_cm_msg msg;
  1542. void *buf;
  1543. int ret;
  1544. if (copy_from_user(&msg, arg, sizeof(msg)))
  1545. return -EFAULT;
  1546. if (msg.size > RIO_MAX_MSG_SIZE)
  1547. return -EINVAL;
  1548. buf = memdup_user((void __user *)(uintptr_t)msg.msg, msg.size);
  1549. if (IS_ERR(buf))
  1550. return PTR_ERR(buf);
  1551. ret = riocm_ch_send(msg.ch_num, buf, msg.size);
  1552. kfree(buf);
  1553. return ret;
  1554. }
  1555. /*
  1556. * cm_chan_msg_rcv() - Receive a message through channel
  1557. * @arg: Inbound message information
  1558. */
  1559. static int cm_chan_msg_rcv(void __user *arg)
  1560. {
  1561. struct rio_cm_msg msg;
  1562. struct rio_channel *ch;
  1563. void *buf;
  1564. long rxto;
  1565. int ret = 0, msg_size;
  1566. if (copy_from_user(&msg, arg, sizeof(msg)))
  1567. return -EFAULT;
  1568. if (msg.ch_num == 0 || msg.size == 0)
  1569. return -EINVAL;
  1570. ch = riocm_get_channel(msg.ch_num);
  1571. if (!ch)
  1572. return -ENODEV;
  1573. rxto = msg.rxto ? msecs_to_jiffies(msg.rxto) : MAX_SCHEDULE_TIMEOUT;
  1574. ret = riocm_ch_receive(ch, &buf, rxto);
  1575. if (ret)
  1576. goto out;
  1577. msg_size = min(msg.size, (u16)(RIO_MAX_MSG_SIZE));
  1578. if (copy_to_user((void __user *)(uintptr_t)msg.msg, buf, msg_size))
  1579. ret = -EFAULT;
  1580. riocm_ch_free_rxbuf(ch, buf);
  1581. out:
  1582. riocm_put_channel(ch);
  1583. return ret;
  1584. }
  1585. /*
  1586. * riocm_cdev_ioctl() - IOCTL requests handler
  1587. */
  1588. static long
  1589. riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1590. {
  1591. switch (cmd) {
  1592. case RIO_CM_EP_GET_LIST_SIZE:
  1593. return cm_ep_get_list_size((void __user *)arg);
  1594. case RIO_CM_EP_GET_LIST:
  1595. return cm_ep_get_list((void __user *)arg);
  1596. case RIO_CM_CHAN_CREATE:
  1597. return cm_chan_create(filp, (void __user *)arg);
  1598. case RIO_CM_CHAN_CLOSE:
  1599. return cm_chan_close(filp, (void __user *)arg);
  1600. case RIO_CM_CHAN_BIND:
  1601. return cm_chan_bind((void __user *)arg);
  1602. case RIO_CM_CHAN_LISTEN:
  1603. return cm_chan_listen((void __user *)arg);
  1604. case RIO_CM_CHAN_ACCEPT:
  1605. return cm_chan_accept(filp, (void __user *)arg);
  1606. case RIO_CM_CHAN_CONNECT:
  1607. return cm_chan_connect((void __user *)arg);
  1608. case RIO_CM_CHAN_SEND:
  1609. return cm_chan_msg_send((void __user *)arg);
  1610. case RIO_CM_CHAN_RECEIVE:
  1611. return cm_chan_msg_rcv((void __user *)arg);
  1612. case RIO_CM_MPORT_GET_LIST:
  1613. return cm_mport_get_list((void __user *)arg);
  1614. default:
  1615. break;
  1616. }
  1617. return -EINVAL;
  1618. }
  1619. static const struct file_operations riocm_cdev_fops = {
  1620. .owner = THIS_MODULE,
  1621. .open = riocm_cdev_open,
  1622. .release = riocm_cdev_release,
  1623. .unlocked_ioctl = riocm_cdev_ioctl,
  1624. };
  1625. /*
  1626. * riocm_add_dev - add new remote RapidIO device into channel management core
  1627. * @dev: device object associated with RapidIO device
  1628. * @sif: subsystem interface
  1629. *
  1630. * Adds the specified RapidIO device (if applicable) into peers list of
  1631. * the corresponding channel management device (cm_dev).
  1632. */
  1633. static int riocm_add_dev(struct device *dev, struct subsys_interface *sif)
  1634. {
  1635. struct cm_peer *peer;
  1636. struct rio_dev *rdev = to_rio_dev(dev);
  1637. struct cm_dev *cm;
  1638. /* Check if the remote device has capabilities required to support CM */
  1639. if (!dev_cm_capable(rdev))
  1640. return 0;
  1641. riocm_debug(RDEV, "(%s)", rio_name(rdev));
  1642. peer = kmalloc(sizeof(*peer), GFP_KERNEL);
  1643. if (!peer)
  1644. return -ENOMEM;
  1645. /* Find a corresponding cm_dev object */
  1646. down_write(&rdev_sem);
  1647. list_for_each_entry(cm, &cm_dev_list, list) {
  1648. if (cm->mport == rdev->net->hport)
  1649. goto found;
  1650. }
  1651. up_write(&rdev_sem);
  1652. kfree(peer);
  1653. return -ENODEV;
  1654. found:
  1655. peer->rdev = rdev;
  1656. list_add_tail(&peer->node, &cm->peers);
  1657. cm->npeers++;
  1658. up_write(&rdev_sem);
  1659. return 0;
  1660. }
  1661. /*
  1662. * riocm_remove_dev - remove remote RapidIO device from channel management core
  1663. * @dev: device object associated with RapidIO device
  1664. * @sif: subsystem interface
  1665. *
  1666. * Removes the specified RapidIO device (if applicable) from peers list of
  1667. * the corresponding channel management device (cm_dev).
  1668. */
  1669. static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)
  1670. {
  1671. struct rio_dev *rdev = to_rio_dev(dev);
  1672. struct cm_dev *cm;
  1673. struct cm_peer *peer;
  1674. struct rio_channel *ch, *_c;
  1675. unsigned int i;
  1676. bool found = false;
  1677. LIST_HEAD(list);
  1678. /* Check if the remote device has capabilities required to support CM */
  1679. if (!dev_cm_capable(rdev))
  1680. return;
  1681. riocm_debug(RDEV, "(%s)", rio_name(rdev));
  1682. /* Find matching cm_dev object */
  1683. down_write(&rdev_sem);
  1684. list_for_each_entry(cm, &cm_dev_list, list) {
  1685. if (cm->mport == rdev->net->hport) {
  1686. found = true;
  1687. break;
  1688. }
  1689. }
  1690. if (!found) {
  1691. up_write(&rdev_sem);
  1692. return;
  1693. }
  1694. /* Remove remote device from the list of peers */
  1695. found = false;
  1696. list_for_each_entry(peer, &cm->peers, node) {
  1697. if (peer->rdev == rdev) {
  1698. riocm_debug(RDEV, "removing peer %s", rio_name(rdev));
  1699. found = true;
  1700. list_del(&peer->node);
  1701. cm->npeers--;
  1702. kfree(peer);
  1703. break;
  1704. }
  1705. }
  1706. up_write(&rdev_sem);
  1707. if (!found)
  1708. return;
  1709. /*
  1710. * Release channels associated with this peer
  1711. */
  1712. spin_lock_bh(&idr_lock);
  1713. idr_for_each_entry(&ch_idr, ch, i) {
  1714. if (ch && ch->rdev == rdev) {
  1715. if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN)
  1716. riocm_exch(ch, RIO_CM_DISCONNECT);
  1717. idr_remove(&ch_idr, ch->id);
  1718. list_add(&ch->ch_node, &list);
  1719. }
  1720. }
  1721. spin_unlock_bh(&idr_lock);
  1722. if (!list_empty(&list)) {
  1723. list_for_each_entry_safe(ch, _c, &list, ch_node) {
  1724. list_del(&ch->ch_node);
  1725. riocm_ch_close(ch);
  1726. }
  1727. }
  1728. }
  1729. /*
  1730. * riocm_cdev_add() - Create rio_cm char device
  1731. * @devno: device number assigned to device (MAJ + MIN)
  1732. */
  1733. static int riocm_cdev_add(dev_t devno)
  1734. {
  1735. int ret;
  1736. cdev_init(&riocm_cdev.cdev, &riocm_cdev_fops);
  1737. riocm_cdev.cdev.owner = THIS_MODULE;
  1738. ret = cdev_add(&riocm_cdev.cdev, devno, 1);
  1739. if (ret < 0) {
  1740. riocm_error("Cannot register a device with error %d", ret);
  1741. return ret;
  1742. }
  1743. riocm_cdev.dev = device_create(dev_class, NULL, devno, NULL, DEV_NAME);
  1744. if (IS_ERR(riocm_cdev.dev)) {
  1745. cdev_del(&riocm_cdev.cdev);
  1746. return PTR_ERR(riocm_cdev.dev);
  1747. }
  1748. riocm_debug(MPORT, "Added %s cdev(%d:%d)",
  1749. DEV_NAME, MAJOR(devno), MINOR(devno));
  1750. return 0;
  1751. }
  1752. /*
  1753. * riocm_add_mport - add new local mport device into channel management core
  1754. * @dev: device object associated with mport
  1755. * @class_intf: class interface
  1756. *
  1757. * When a new mport device is added, CM immediately reserves inbound and
  1758. * outbound RapidIO mailboxes that will be used.
  1759. */
  1760. static int riocm_add_mport(struct device *dev,
  1761. struct class_interface *class_intf)
  1762. {
  1763. int rc;
  1764. int i;
  1765. struct cm_dev *cm;
  1766. struct rio_mport *mport = to_rio_mport(dev);
  1767. riocm_debug(MPORT, "add mport %s", mport->name);
  1768. cm = kzalloc(sizeof(*cm), GFP_KERNEL);
  1769. if (!cm)
  1770. return -ENOMEM;
  1771. cm->mport = mport;
  1772. rc = rio_request_outb_mbox(mport, cm, cmbox,
  1773. RIOCM_TX_RING_SIZE, riocm_outb_msg_event);
  1774. if (rc) {
  1775. riocm_error("failed to allocate OBMBOX_%d on %s",
  1776. cmbox, mport->name);
  1777. kfree(cm);
  1778. return -ENODEV;
  1779. }
  1780. rc = rio_request_inb_mbox(mport, cm, cmbox,
  1781. RIOCM_RX_RING_SIZE, riocm_inb_msg_event);
  1782. if (rc) {
  1783. riocm_error("failed to allocate IBMBOX_%d on %s",
  1784. cmbox, mport->name);
  1785. rio_release_outb_mbox(mport, cmbox);
  1786. kfree(cm);
  1787. return -ENODEV;
  1788. }
  1789. cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
  1790. if (!cm->rx_wq) {
  1791. rio_release_inb_mbox(mport, cmbox);
  1792. rio_release_outb_mbox(mport, cmbox);
  1793. kfree(cm);
  1794. return -ENOMEM;
  1795. }
  1796. /*
  1797. * Allocate and register inbound messaging buffers to be ready
  1798. * to receive channel and system management requests
  1799. */
  1800. for (i = 0; i < RIOCM_RX_RING_SIZE; i++)
  1801. cm->rx_buf[i] = NULL;
  1802. cm->rx_slots = RIOCM_RX_RING_SIZE;
  1803. mutex_init(&cm->rx_lock);
  1804. riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
  1805. INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
  1806. cm->tx_slot = 0;
  1807. cm->tx_cnt = 0;
  1808. cm->tx_ack_slot = 0;
  1809. spin_lock_init(&cm->tx_lock);
  1810. INIT_LIST_HEAD(&cm->peers);
  1811. cm->npeers = 0;
  1812. INIT_LIST_HEAD(&cm->tx_reqs);
  1813. down_write(&rdev_sem);
  1814. list_add_tail(&cm->list, &cm_dev_list);
  1815. up_write(&rdev_sem);
  1816. return 0;
  1817. }
  1818. /*
  1819. * riocm_remove_mport - remove local mport device from channel management core
  1820. * @dev: device object associated with mport
  1821. * @class_intf: class interface
  1822. *
  1823. * Removes a local mport device from the list of registered devices that provide
  1824. * channel management services. Returns an error if the specified mport is not
  1825. * registered with the CM core.
  1826. */
  1827. static void riocm_remove_mport(struct device *dev,
  1828. struct class_interface *class_intf)
  1829. {
  1830. struct rio_mport *mport = to_rio_mport(dev);
  1831. struct cm_dev *cm;
  1832. struct cm_peer *peer, *temp;
  1833. struct rio_channel *ch, *_c;
  1834. unsigned int i;
  1835. bool found = false;
  1836. LIST_HEAD(list);
  1837. riocm_debug(MPORT, "%s", mport->name);
  1838. /* Find a matching cm_dev object */
  1839. down_write(&rdev_sem);
  1840. list_for_each_entry(cm, &cm_dev_list, list) {
  1841. if (cm->mport == mport) {
  1842. list_del(&cm->list);
  1843. found = true;
  1844. break;
  1845. }
  1846. }
  1847. up_write(&rdev_sem);
  1848. if (!found)
  1849. return;
  1850. flush_workqueue(cm->rx_wq);
  1851. destroy_workqueue(cm->rx_wq);
  1852. /* Release channels bound to this mport */
  1853. spin_lock_bh(&idr_lock);
  1854. idr_for_each_entry(&ch_idr, ch, i) {
  1855. if (ch->cmdev == cm) {
  1856. riocm_debug(RDEV, "%s drop ch_%d",
  1857. mport->name, ch->id);
  1858. idr_remove(&ch_idr, ch->id);
  1859. list_add(&ch->ch_node, &list);
  1860. }
  1861. }
  1862. spin_unlock_bh(&idr_lock);
  1863. if (!list_empty(&list)) {
  1864. list_for_each_entry_safe(ch, _c, &list, ch_node) {
  1865. list_del(&ch->ch_node);
  1866. riocm_ch_close(ch);
  1867. }
  1868. }
  1869. rio_release_inb_mbox(mport, cmbox);
  1870. rio_release_outb_mbox(mport, cmbox);
  1871. /* Remove and free peer entries */
  1872. if (!list_empty(&cm->peers))
  1873. riocm_debug(RDEV, "ATTN: peer list not empty");
  1874. list_for_each_entry_safe(peer, temp, &cm->peers, node) {
  1875. riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev));
  1876. list_del(&peer->node);
  1877. kfree(peer);
  1878. }
  1879. riocm_rx_free(cm);
  1880. kfree(cm);
  1881. riocm_debug(MPORT, "%s done", mport->name);
  1882. }
  1883. static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
  1884. void *unused)
  1885. {
  1886. struct rio_channel *ch;
  1887. unsigned int i;
  1888. LIST_HEAD(list);
  1889. riocm_debug(EXIT, ".");
  1890. /*
  1891. * If there are any channels left in connected state send
  1892. * close notification to the connection partner.
  1893. * First build a list of channels that require a closing
  1894. * notification because function riocm_send_close() should
  1895. * be called outside of spinlock protected code.
  1896. */
  1897. spin_lock_bh(&idr_lock);
  1898. idr_for_each_entry(&ch_idr, ch, i) {
  1899. if (ch->state == RIO_CM_CONNECTED) {
  1900. riocm_debug(EXIT, "close ch %d", ch->id);
  1901. idr_remove(&ch_idr, ch->id);
  1902. list_add(&ch->ch_node, &list);
  1903. }
  1904. }
  1905. spin_unlock_bh(&idr_lock);
  1906. list_for_each_entry(ch, &list, ch_node)
  1907. riocm_send_close(ch);
  1908. return NOTIFY_DONE;
  1909. }
  1910. /*
  1911. * riocm_interface handles addition/removal of remote RapidIO devices
  1912. */
  1913. static struct subsys_interface riocm_interface = {
  1914. .name = "rio_cm",
  1915. .subsys = &rio_bus_type,
  1916. .add_dev = riocm_add_dev,
  1917. .remove_dev = riocm_remove_dev,
  1918. };
  1919. /*
  1920. * rio_mport_interface handles addition/removal local mport devices
  1921. */
  1922. static struct class_interface rio_mport_interface __refdata = {
  1923. .class = &rio_mport_class,
  1924. .add_dev = riocm_add_mport,
  1925. .remove_dev = riocm_remove_mport,
  1926. };
  1927. static struct notifier_block rio_cm_notifier = {
  1928. .notifier_call = rio_cm_shutdown,
  1929. };
  1930. static int __init riocm_init(void)
  1931. {
  1932. int ret;
  1933. /* Create device class needed by udev */
  1934. dev_class = class_create(THIS_MODULE, DRV_NAME);
  1935. if (IS_ERR(dev_class)) {
  1936. riocm_error("Cannot create " DRV_NAME " class");
  1937. return PTR_ERR(dev_class);
  1938. }
  1939. ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME);
  1940. if (ret) {
  1941. class_destroy(dev_class);
  1942. return ret;
  1943. }
  1944. dev_major = MAJOR(dev_number);
  1945. dev_minor_base = MINOR(dev_number);
  1946. riocm_debug(INIT, "Registered class with %d major", dev_major);
  1947. /*
  1948. * Register as rapidio_port class interface to get notifications about
  1949. * mport additions and removals.
  1950. */
  1951. ret = class_interface_register(&rio_mport_interface);
  1952. if (ret) {
  1953. riocm_error("class_interface_register error: %d", ret);
  1954. goto err_reg;
  1955. }
  1956. /*
  1957. * Register as RapidIO bus interface to get notifications about
  1958. * addition/removal of remote RapidIO devices.
  1959. */
  1960. ret = subsys_interface_register(&riocm_interface);
  1961. if (ret) {
  1962. riocm_error("subsys_interface_register error: %d", ret);
  1963. goto err_cl;
  1964. }
  1965. ret = register_reboot_notifier(&rio_cm_notifier);
  1966. if (ret) {
  1967. riocm_error("failed to register reboot notifier (err=%d)", ret);
  1968. goto err_sif;
  1969. }
  1970. ret = riocm_cdev_add(dev_number);
  1971. if (ret) {
  1972. unregister_reboot_notifier(&rio_cm_notifier);
  1973. ret = -ENODEV;
  1974. goto err_sif;
  1975. }
  1976. return 0;
  1977. err_sif:
  1978. subsys_interface_unregister(&riocm_interface);
  1979. err_cl:
  1980. class_interface_unregister(&rio_mport_interface);
  1981. err_reg:
  1982. unregister_chrdev_region(dev_number, 1);
  1983. class_destroy(dev_class);
  1984. return ret;
  1985. }
  1986. static void __exit riocm_exit(void)
  1987. {
  1988. riocm_debug(EXIT, "enter");
  1989. unregister_reboot_notifier(&rio_cm_notifier);
  1990. subsys_interface_unregister(&riocm_interface);
  1991. class_interface_unregister(&rio_mport_interface);
  1992. idr_destroy(&ch_idr);
  1993. device_unregister(riocm_cdev.dev);
  1994. cdev_del(&(riocm_cdev.cdev));
  1995. class_destroy(dev_class);
  1996. unregister_chrdev_region(dev_number, 1);
  1997. }
  1998. late_initcall(riocm_init);
  1999. module_exit(riocm_exit);