ncsi-manage.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright Gavin Shan, IBM Corporation 2016.
  4. */
  5. #include <linux/module.h>
  6. #include <linux/kernel.h>
  7. #include <linux/init.h>
  8. #include <linux/netdevice.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/of.h>
  11. #include <linux/platform_device.h>
  12. #include <net/ncsi.h>
  13. #include <net/net_namespace.h>
  14. #include <net/sock.h>
  15. #include <net/addrconf.h>
  16. #include <net/ipv6.h>
  17. #include <net/genetlink.h>
  18. #include "internal.h"
  19. #include "ncsi-pkt.h"
  20. #include "ncsi-netlink.h"
  21. LIST_HEAD(ncsi_dev_list);
  22. DEFINE_SPINLOCK(ncsi_dev_lock);
  23. bool ncsi_channel_has_link(struct ncsi_channel *channel)
  24. {
  25. return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
  26. }
  27. bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
  28. struct ncsi_channel *channel)
  29. {
  30. struct ncsi_package *np;
  31. struct ncsi_channel *nc;
  32. NCSI_FOR_EACH_PACKAGE(ndp, np)
  33. NCSI_FOR_EACH_CHANNEL(np, nc) {
  34. if (nc == channel)
  35. continue;
  36. if (nc->state == NCSI_CHANNEL_ACTIVE &&
  37. ncsi_channel_has_link(nc))
  38. return false;
  39. }
  40. return true;
  41. }
  42. static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
  43. {
  44. struct ncsi_dev *nd = &ndp->ndev;
  45. struct ncsi_package *np;
  46. struct ncsi_channel *nc;
  47. unsigned long flags;
  48. nd->state = ncsi_dev_state_functional;
  49. if (force_down) {
  50. nd->link_up = 0;
  51. goto report;
  52. }
  53. nd->link_up = 0;
  54. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  55. NCSI_FOR_EACH_CHANNEL(np, nc) {
  56. spin_lock_irqsave(&nc->lock, flags);
  57. if (!list_empty(&nc->link) ||
  58. nc->state != NCSI_CHANNEL_ACTIVE) {
  59. spin_unlock_irqrestore(&nc->lock, flags);
  60. continue;
  61. }
  62. if (ncsi_channel_has_link(nc)) {
  63. spin_unlock_irqrestore(&nc->lock, flags);
  64. nd->link_up = 1;
  65. goto report;
  66. }
  67. spin_unlock_irqrestore(&nc->lock, flags);
  68. }
  69. }
  70. report:
  71. nd->handler(nd);
  72. }
  73. static void ncsi_channel_monitor(struct timer_list *t)
  74. {
  75. struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
  76. struct ncsi_package *np = nc->package;
  77. struct ncsi_dev_priv *ndp = np->ndp;
  78. struct ncsi_channel_mode *ncm;
  79. struct ncsi_cmd_arg nca;
  80. bool enabled, chained;
  81. unsigned int monitor_state;
  82. unsigned long flags;
  83. int state, ret;
  84. spin_lock_irqsave(&nc->lock, flags);
  85. state = nc->state;
  86. chained = !list_empty(&nc->link);
  87. enabled = nc->monitor.enabled;
  88. monitor_state = nc->monitor.state;
  89. spin_unlock_irqrestore(&nc->lock, flags);
  90. if (!enabled)
  91. return; /* expected race disabling timer */
  92. if (WARN_ON_ONCE(chained))
  93. goto bad_state;
  94. if (state != NCSI_CHANNEL_INACTIVE &&
  95. state != NCSI_CHANNEL_ACTIVE) {
  96. bad_state:
  97. netdev_warn(ndp->ndev.dev,
  98. "Bad NCSI monitor state channel %d 0x%x %s queue\n",
  99. nc->id, state, chained ? "on" : "off");
  100. spin_lock_irqsave(&nc->lock, flags);
  101. nc->monitor.enabled = false;
  102. spin_unlock_irqrestore(&nc->lock, flags);
  103. return;
  104. }
  105. switch (monitor_state) {
  106. case NCSI_CHANNEL_MONITOR_START:
  107. case NCSI_CHANNEL_MONITOR_RETRY:
  108. nca.ndp = ndp;
  109. nca.package = np->id;
  110. nca.channel = nc->id;
  111. nca.type = NCSI_PKT_CMD_GLS;
  112. nca.req_flags = 0;
  113. ret = ncsi_xmit_cmd(&nca);
  114. if (ret)
  115. netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
  116. ret);
  117. break;
  118. case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
  119. break;
  120. default:
  121. netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
  122. nc->id);
  123. ncsi_report_link(ndp, true);
  124. ndp->flags |= NCSI_DEV_RESHUFFLE;
  125. ncm = &nc->modes[NCSI_MODE_LINK];
  126. spin_lock_irqsave(&nc->lock, flags);
  127. nc->monitor.enabled = false;
  128. nc->state = NCSI_CHANNEL_INVISIBLE;
  129. ncm->data[2] &= ~0x1;
  130. spin_unlock_irqrestore(&nc->lock, flags);
  131. spin_lock_irqsave(&ndp->lock, flags);
  132. nc->state = NCSI_CHANNEL_ACTIVE;
  133. list_add_tail_rcu(&nc->link, &ndp->channel_queue);
  134. spin_unlock_irqrestore(&ndp->lock, flags);
  135. ncsi_process_next_channel(ndp);
  136. return;
  137. }
  138. spin_lock_irqsave(&nc->lock, flags);
  139. nc->monitor.state++;
  140. spin_unlock_irqrestore(&nc->lock, flags);
  141. mod_timer(&nc->monitor.timer, jiffies + HZ);
  142. }
  143. void ncsi_start_channel_monitor(struct ncsi_channel *nc)
  144. {
  145. unsigned long flags;
  146. spin_lock_irqsave(&nc->lock, flags);
  147. WARN_ON_ONCE(nc->monitor.enabled);
  148. nc->monitor.enabled = true;
  149. nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
  150. spin_unlock_irqrestore(&nc->lock, flags);
  151. mod_timer(&nc->monitor.timer, jiffies + HZ);
  152. }
  153. void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
  154. {
  155. unsigned long flags;
  156. spin_lock_irqsave(&nc->lock, flags);
  157. if (!nc->monitor.enabled) {
  158. spin_unlock_irqrestore(&nc->lock, flags);
  159. return;
  160. }
  161. nc->monitor.enabled = false;
  162. spin_unlock_irqrestore(&nc->lock, flags);
  163. del_timer_sync(&nc->monitor.timer);
  164. }
  165. struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
  166. unsigned char id)
  167. {
  168. struct ncsi_channel *nc;
  169. NCSI_FOR_EACH_CHANNEL(np, nc) {
  170. if (nc->id == id)
  171. return nc;
  172. }
  173. return NULL;
  174. }
  175. struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
  176. {
  177. struct ncsi_channel *nc, *tmp;
  178. int index;
  179. unsigned long flags;
  180. nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
  181. if (!nc)
  182. return NULL;
  183. nc->id = id;
  184. nc->package = np;
  185. nc->state = NCSI_CHANNEL_INACTIVE;
  186. nc->monitor.enabled = false;
  187. timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
  188. spin_lock_init(&nc->lock);
  189. INIT_LIST_HEAD(&nc->link);
  190. for (index = 0; index < NCSI_CAP_MAX; index++)
  191. nc->caps[index].index = index;
  192. for (index = 0; index < NCSI_MODE_MAX; index++)
  193. nc->modes[index].index = index;
  194. spin_lock_irqsave(&np->lock, flags);
  195. tmp = ncsi_find_channel(np, id);
  196. if (tmp) {
  197. spin_unlock_irqrestore(&np->lock, flags);
  198. kfree(nc);
  199. return tmp;
  200. }
  201. list_add_tail_rcu(&nc->node, &np->channels);
  202. np->channel_num++;
  203. spin_unlock_irqrestore(&np->lock, flags);
  204. return nc;
  205. }
  206. static void ncsi_remove_channel(struct ncsi_channel *nc)
  207. {
  208. struct ncsi_package *np = nc->package;
  209. unsigned long flags;
  210. spin_lock_irqsave(&nc->lock, flags);
  211. /* Release filters */
  212. kfree(nc->mac_filter.addrs);
  213. kfree(nc->vlan_filter.vids);
  214. nc->state = NCSI_CHANNEL_INACTIVE;
  215. spin_unlock_irqrestore(&nc->lock, flags);
  216. ncsi_stop_channel_monitor(nc);
  217. /* Remove and free channel */
  218. spin_lock_irqsave(&np->lock, flags);
  219. list_del_rcu(&nc->node);
  220. np->channel_num--;
  221. spin_unlock_irqrestore(&np->lock, flags);
  222. kfree(nc);
  223. }
  224. struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
  225. unsigned char id)
  226. {
  227. struct ncsi_package *np;
  228. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  229. if (np->id == id)
  230. return np;
  231. }
  232. return NULL;
  233. }
  234. struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
  235. unsigned char id)
  236. {
  237. struct ncsi_package *np, *tmp;
  238. unsigned long flags;
  239. np = kzalloc(sizeof(*np), GFP_ATOMIC);
  240. if (!np)
  241. return NULL;
  242. np->id = id;
  243. np->ndp = ndp;
  244. spin_lock_init(&np->lock);
  245. INIT_LIST_HEAD(&np->channels);
  246. np->channel_whitelist = UINT_MAX;
  247. spin_lock_irqsave(&ndp->lock, flags);
  248. tmp = ncsi_find_package(ndp, id);
  249. if (tmp) {
  250. spin_unlock_irqrestore(&ndp->lock, flags);
  251. kfree(np);
  252. return tmp;
  253. }
  254. list_add_tail_rcu(&np->node, &ndp->packages);
  255. ndp->package_num++;
  256. spin_unlock_irqrestore(&ndp->lock, flags);
  257. return np;
  258. }
  259. void ncsi_remove_package(struct ncsi_package *np)
  260. {
  261. struct ncsi_dev_priv *ndp = np->ndp;
  262. struct ncsi_channel *nc, *tmp;
  263. unsigned long flags;
  264. /* Release all child channels */
  265. list_for_each_entry_safe(nc, tmp, &np->channels, node)
  266. ncsi_remove_channel(nc);
  267. /* Remove and free package */
  268. spin_lock_irqsave(&ndp->lock, flags);
  269. list_del_rcu(&np->node);
  270. ndp->package_num--;
  271. spin_unlock_irqrestore(&ndp->lock, flags);
  272. kfree(np);
  273. }
  274. void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
  275. unsigned char id,
  276. struct ncsi_package **np,
  277. struct ncsi_channel **nc)
  278. {
  279. struct ncsi_package *p;
  280. struct ncsi_channel *c;
  281. p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
  282. c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
  283. if (np)
  284. *np = p;
  285. if (nc)
  286. *nc = c;
  287. }
  288. /* For two consecutive NCSI commands, the packet IDs shouldn't
  289. * be same. Otherwise, the bogus response might be replied. So
  290. * the available IDs are allocated in round-robin fashion.
  291. */
  292. struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
  293. unsigned int req_flags)
  294. {
  295. struct ncsi_request *nr = NULL;
  296. int i, limit = ARRAY_SIZE(ndp->requests);
  297. unsigned long flags;
  298. /* Check if there is one available request until the ceiling */
  299. spin_lock_irqsave(&ndp->lock, flags);
  300. for (i = ndp->request_id; i < limit; i++) {
  301. if (ndp->requests[i].used)
  302. continue;
  303. nr = &ndp->requests[i];
  304. nr->used = true;
  305. nr->flags = req_flags;
  306. ndp->request_id = i + 1;
  307. goto found;
  308. }
  309. /* Fail back to check from the starting cursor */
  310. for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
  311. if (ndp->requests[i].used)
  312. continue;
  313. nr = &ndp->requests[i];
  314. nr->used = true;
  315. nr->flags = req_flags;
  316. ndp->request_id = i + 1;
  317. goto found;
  318. }
  319. found:
  320. spin_unlock_irqrestore(&ndp->lock, flags);
  321. return nr;
  322. }
  323. void ncsi_free_request(struct ncsi_request *nr)
  324. {
  325. struct ncsi_dev_priv *ndp = nr->ndp;
  326. struct sk_buff *cmd, *rsp;
  327. unsigned long flags;
  328. bool driven;
  329. if (nr->enabled) {
  330. nr->enabled = false;
  331. del_timer_sync(&nr->timer);
  332. }
  333. spin_lock_irqsave(&ndp->lock, flags);
  334. cmd = nr->cmd;
  335. rsp = nr->rsp;
  336. nr->cmd = NULL;
  337. nr->rsp = NULL;
  338. nr->used = false;
  339. driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
  340. spin_unlock_irqrestore(&ndp->lock, flags);
  341. if (driven && cmd && --ndp->pending_req_num == 0)
  342. schedule_work(&ndp->work);
  343. /* Release command and response */
  344. consume_skb(cmd);
  345. consume_skb(rsp);
  346. }
  347. struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
  348. {
  349. struct ncsi_dev_priv *ndp;
  350. NCSI_FOR_EACH_DEV(ndp) {
  351. if (ndp->ndev.dev == dev)
  352. return &ndp->ndev;
  353. }
  354. return NULL;
  355. }
  356. static void ncsi_request_timeout(struct timer_list *t)
  357. {
  358. struct ncsi_request *nr = from_timer(nr, t, timer);
  359. struct ncsi_dev_priv *ndp = nr->ndp;
  360. struct ncsi_cmd_pkt *cmd;
  361. struct ncsi_package *np;
  362. struct ncsi_channel *nc;
  363. unsigned long flags;
  364. /* If the request already had associated response,
  365. * let the response handler to release it.
  366. */
  367. spin_lock_irqsave(&ndp->lock, flags);
  368. nr->enabled = false;
  369. if (nr->rsp || !nr->cmd) {
  370. spin_unlock_irqrestore(&ndp->lock, flags);
  371. return;
  372. }
  373. spin_unlock_irqrestore(&ndp->lock, flags);
  374. if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
  375. if (nr->cmd) {
  376. /* Find the package */
  377. cmd = (struct ncsi_cmd_pkt *)
  378. skb_network_header(nr->cmd);
  379. ncsi_find_package_and_channel(ndp,
  380. cmd->cmd.common.channel,
  381. &np, &nc);
  382. ncsi_send_netlink_timeout(nr, np, nc);
  383. }
  384. }
  385. /* Release the request */
  386. ncsi_free_request(nr);
  387. }
  388. static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
  389. {
  390. struct ncsi_dev *nd = &ndp->ndev;
  391. struct ncsi_package *np;
  392. struct ncsi_channel *nc, *tmp;
  393. struct ncsi_cmd_arg nca;
  394. unsigned long flags;
  395. int ret;
  396. np = ndp->active_package;
  397. nc = ndp->active_channel;
  398. nca.ndp = ndp;
  399. nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
  400. switch (nd->state) {
  401. case ncsi_dev_state_suspend:
  402. nd->state = ncsi_dev_state_suspend_select;
  403. fallthrough;
  404. case ncsi_dev_state_suspend_select:
  405. ndp->pending_req_num = 1;
  406. nca.type = NCSI_PKT_CMD_SP;
  407. nca.package = np->id;
  408. nca.channel = NCSI_RESERVED_CHANNEL;
  409. if (ndp->flags & NCSI_DEV_HWA)
  410. nca.bytes[0] = 0;
  411. else
  412. nca.bytes[0] = 1;
  413. /* To retrieve the last link states of channels in current
  414. * package when current active channel needs fail over to
  415. * another one. It means we will possibly select another
  416. * channel as next active one. The link states of channels
  417. * are most important factor of the selection. So we need
  418. * accurate link states. Unfortunately, the link states on
  419. * inactive channels can't be updated with LSC AEN in time.
  420. */
  421. if (ndp->flags & NCSI_DEV_RESHUFFLE)
  422. nd->state = ncsi_dev_state_suspend_gls;
  423. else
  424. nd->state = ncsi_dev_state_suspend_dcnt;
  425. ret = ncsi_xmit_cmd(&nca);
  426. if (ret)
  427. goto error;
  428. break;
  429. case ncsi_dev_state_suspend_gls:
  430. ndp->pending_req_num = np->channel_num;
  431. nca.type = NCSI_PKT_CMD_GLS;
  432. nca.package = np->id;
  433. nd->state = ncsi_dev_state_suspend_dcnt;
  434. NCSI_FOR_EACH_CHANNEL(np, nc) {
  435. nca.channel = nc->id;
  436. ret = ncsi_xmit_cmd(&nca);
  437. if (ret)
  438. goto error;
  439. }
  440. break;
  441. case ncsi_dev_state_suspend_dcnt:
  442. ndp->pending_req_num = 1;
  443. nca.type = NCSI_PKT_CMD_DCNT;
  444. nca.package = np->id;
  445. nca.channel = nc->id;
  446. nd->state = ncsi_dev_state_suspend_dc;
  447. ret = ncsi_xmit_cmd(&nca);
  448. if (ret)
  449. goto error;
  450. break;
  451. case ncsi_dev_state_suspend_dc:
  452. ndp->pending_req_num = 1;
  453. nca.type = NCSI_PKT_CMD_DC;
  454. nca.package = np->id;
  455. nca.channel = nc->id;
  456. nca.bytes[0] = 1;
  457. nd->state = ncsi_dev_state_suspend_deselect;
  458. ret = ncsi_xmit_cmd(&nca);
  459. if (ret)
  460. goto error;
  461. NCSI_FOR_EACH_CHANNEL(np, tmp) {
  462. /* If there is another channel active on this package
  463. * do not deselect the package.
  464. */
  465. if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
  466. nd->state = ncsi_dev_state_suspend_done;
  467. break;
  468. }
  469. }
  470. break;
  471. case ncsi_dev_state_suspend_deselect:
  472. ndp->pending_req_num = 1;
  473. nca.type = NCSI_PKT_CMD_DP;
  474. nca.package = np->id;
  475. nca.channel = NCSI_RESERVED_CHANNEL;
  476. nd->state = ncsi_dev_state_suspend_done;
  477. ret = ncsi_xmit_cmd(&nca);
  478. if (ret)
  479. goto error;
  480. break;
  481. case ncsi_dev_state_suspend_done:
  482. spin_lock_irqsave(&nc->lock, flags);
  483. nc->state = NCSI_CHANNEL_INACTIVE;
  484. spin_unlock_irqrestore(&nc->lock, flags);
  485. if (ndp->flags & NCSI_DEV_RESET)
  486. ncsi_reset_dev(nd);
  487. else
  488. ncsi_process_next_channel(ndp);
  489. break;
  490. default:
  491. netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
  492. nd->state);
  493. }
  494. return;
  495. error:
  496. nd->state = ncsi_dev_state_functional;
  497. }
  498. /* Check the VLAN filter bitmap for a set filter, and construct a
  499. * "Set VLAN Filter - Disable" packet if found.
  500. */
  501. static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
  502. struct ncsi_cmd_arg *nca)
  503. {
  504. struct ncsi_channel_vlan_filter *ncf;
  505. unsigned long flags;
  506. void *bitmap;
  507. int index;
  508. u16 vid;
  509. ncf = &nc->vlan_filter;
  510. bitmap = &ncf->bitmap;
  511. spin_lock_irqsave(&nc->lock, flags);
  512. index = find_next_bit(bitmap, ncf->n_vids, 0);
  513. if (index >= ncf->n_vids) {
  514. spin_unlock_irqrestore(&nc->lock, flags);
  515. return -1;
  516. }
  517. vid = ncf->vids[index];
  518. clear_bit(index, bitmap);
  519. ncf->vids[index] = 0;
  520. spin_unlock_irqrestore(&nc->lock, flags);
  521. nca->type = NCSI_PKT_CMD_SVF;
  522. nca->words[1] = vid;
  523. /* HW filter index starts at 1 */
  524. nca->bytes[6] = index + 1;
  525. nca->bytes[7] = 0x00;
  526. return 0;
  527. }
  528. /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
  529. * packet.
  530. */
  531. static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
  532. struct ncsi_cmd_arg *nca)
  533. {
  534. struct ncsi_channel_vlan_filter *ncf;
  535. struct vlan_vid *vlan = NULL;
  536. unsigned long flags;
  537. int i, index;
  538. void *bitmap;
  539. u16 vid;
  540. if (list_empty(&ndp->vlan_vids))
  541. return -1;
  542. ncf = &nc->vlan_filter;
  543. bitmap = &ncf->bitmap;
  544. spin_lock_irqsave(&nc->lock, flags);
  545. rcu_read_lock();
  546. list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
  547. vid = vlan->vid;
  548. for (i = 0; i < ncf->n_vids; i++)
  549. if (ncf->vids[i] == vid) {
  550. vid = 0;
  551. break;
  552. }
  553. if (vid)
  554. break;
  555. }
  556. rcu_read_unlock();
  557. if (!vid) {
  558. /* No VLAN ID is not set */
  559. spin_unlock_irqrestore(&nc->lock, flags);
  560. return -1;
  561. }
  562. index = find_next_zero_bit(bitmap, ncf->n_vids, 0);
  563. if (index < 0 || index >= ncf->n_vids) {
  564. netdev_err(ndp->ndev.dev,
  565. "Channel %u already has all VLAN filters set\n",
  566. nc->id);
  567. spin_unlock_irqrestore(&nc->lock, flags);
  568. return -1;
  569. }
  570. ncf->vids[index] = vid;
  571. set_bit(index, bitmap);
  572. spin_unlock_irqrestore(&nc->lock, flags);
  573. nca->type = NCSI_PKT_CMD_SVF;
  574. nca->words[1] = vid;
  575. /* HW filter index starts at 1 */
  576. nca->bytes[6] = index + 1;
  577. nca->bytes[7] = 0x01;
  578. return 0;
  579. }
  580. #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
  581. /* NCSI OEM Command APIs */
  582. static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
  583. {
  584. unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
  585. int ret = 0;
  586. nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
  587. memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
  588. *(unsigned int *)data = ntohl(NCSI_OEM_MFR_BCM_ID);
  589. data[5] = NCSI_OEM_BCM_CMD_GMA;
  590. nca->data = data;
  591. ret = ncsi_xmit_cmd(nca);
  592. if (ret)
  593. netdev_err(nca->ndp->ndev.dev,
  594. "NCSI: Failed to transmit cmd 0x%x during configure\n",
  595. nca->type);
  596. return ret;
  597. }
  598. static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
  599. {
  600. union {
  601. u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
  602. u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
  603. } u;
  604. int ret = 0;
  605. nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
  606. memset(&u, 0, sizeof(u));
  607. u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
  608. u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
  609. u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
  610. nca->data = u.data_u8;
  611. ret = ncsi_xmit_cmd(nca);
  612. if (ret)
  613. netdev_err(nca->ndp->ndev.dev,
  614. "NCSI: Failed to transmit cmd 0x%x during configure\n",
  615. nca->type);
  616. return ret;
  617. }
  618. static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
  619. {
  620. union {
  621. u8 data_u8[NCSI_OEM_MLX_CMD_SMAF_LEN];
  622. u32 data_u32[NCSI_OEM_MLX_CMD_SMAF_LEN / sizeof(u32)];
  623. } u;
  624. int ret = 0;
  625. memset(&u, 0, sizeof(u));
  626. u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
  627. u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
  628. u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
  629. memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
  630. nca->ndp->ndev.dev->dev_addr, ETH_ALEN);
  631. u.data_u8[MLX_SMAF_MED_SUPPORT_OFFSET] =
  632. (MLX_MC_RBT_AVL | MLX_MC_RBT_SUPPORT);
  633. nca->payload = NCSI_OEM_MLX_CMD_SMAF_LEN;
  634. nca->data = u.data_u8;
  635. ret = ncsi_xmit_cmd(nca);
  636. if (ret)
  637. netdev_err(nca->ndp->ndev.dev,
  638. "NCSI: Failed to transmit cmd 0x%x during probe\n",
  639. nca->type);
  640. return ret;
  641. }
  642. /* OEM Command handlers initialization */
  643. static struct ncsi_oem_gma_handler {
  644. unsigned int mfr_id;
  645. int (*handler)(struct ncsi_cmd_arg *nca);
  646. } ncsi_oem_gma_handlers[] = {
  647. { NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
  648. { NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx }
  649. };
  650. static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
  651. {
  652. struct ncsi_oem_gma_handler *nch = NULL;
  653. int i;
  654. /* This function should only be called once, return if flag set */
  655. if (nca->ndp->gma_flag == 1)
  656. return -1;
  657. /* Find gma handler for given manufacturer id */
  658. for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
  659. if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
  660. if (ncsi_oem_gma_handlers[i].handler)
  661. nch = &ncsi_oem_gma_handlers[i];
  662. break;
  663. }
  664. }
  665. if (!nch) {
  666. netdev_err(nca->ndp->ndev.dev,
  667. "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
  668. mf_id);
  669. return -1;
  670. }
  671. /* Get Mac address from NCSI device */
  672. return nch->handler(nca);
  673. }
  674. #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
  675. /* Determine if a given channel from the channel_queue should be used for Tx */
  676. static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
  677. struct ncsi_channel *nc)
  678. {
  679. struct ncsi_channel_mode *ncm;
  680. struct ncsi_channel *channel;
  681. struct ncsi_package *np;
  682. /* Check if any other channel has Tx enabled; a channel may have already
  683. * been configured and removed from the channel queue.
  684. */
  685. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  686. if (!ndp->multi_package && np != nc->package)
  687. continue;
  688. NCSI_FOR_EACH_CHANNEL(np, channel) {
  689. ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
  690. if (ncm->enable)
  691. return false;
  692. }
  693. }
  694. /* This channel is the preferred channel and has link */
  695. list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
  696. np = channel->package;
  697. if (np->preferred_channel &&
  698. ncsi_channel_has_link(np->preferred_channel)) {
  699. return np->preferred_channel == nc;
  700. }
  701. }
  702. /* This channel has link */
  703. if (ncsi_channel_has_link(nc))
  704. return true;
  705. list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
  706. if (ncsi_channel_has_link(channel))
  707. return false;
  708. /* No other channel has link; default to this one */
  709. return true;
  710. }
  711. /* Change the active Tx channel in a multi-channel setup */
  712. int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
  713. struct ncsi_package *package,
  714. struct ncsi_channel *disable,
  715. struct ncsi_channel *enable)
  716. {
  717. struct ncsi_cmd_arg nca;
  718. struct ncsi_channel *nc;
  719. struct ncsi_package *np;
  720. int ret = 0;
  721. if (!package->multi_channel && !ndp->multi_package)
  722. netdev_warn(ndp->ndev.dev,
  723. "NCSI: Trying to update Tx channel in single-channel mode\n");
  724. nca.ndp = ndp;
  725. nca.req_flags = 0;
  726. /* Find current channel with Tx enabled */
  727. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  728. if (disable)
  729. break;
  730. if (!ndp->multi_package && np != package)
  731. continue;
  732. NCSI_FOR_EACH_CHANNEL(np, nc)
  733. if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
  734. disable = nc;
  735. break;
  736. }
  737. }
  738. /* Find a suitable channel for Tx */
  739. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  740. if (enable)
  741. break;
  742. if (!ndp->multi_package && np != package)
  743. continue;
  744. if (!(ndp->package_whitelist & (0x1 << np->id)))
  745. continue;
  746. if (np->preferred_channel &&
  747. ncsi_channel_has_link(np->preferred_channel)) {
  748. enable = np->preferred_channel;
  749. break;
  750. }
  751. NCSI_FOR_EACH_CHANNEL(np, nc) {
  752. if (!(np->channel_whitelist & 0x1 << nc->id))
  753. continue;
  754. if (nc->state != NCSI_CHANNEL_ACTIVE)
  755. continue;
  756. if (ncsi_channel_has_link(nc)) {
  757. enable = nc;
  758. break;
  759. }
  760. }
  761. }
  762. if (disable == enable)
  763. return -1;
  764. if (!enable)
  765. return -1;
  766. if (disable) {
  767. nca.channel = disable->id;
  768. nca.package = disable->package->id;
  769. nca.type = NCSI_PKT_CMD_DCNT;
  770. ret = ncsi_xmit_cmd(&nca);
  771. if (ret)
  772. netdev_err(ndp->ndev.dev,
  773. "Error %d sending DCNT\n",
  774. ret);
  775. }
  776. netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
  777. nca.channel = enable->id;
  778. nca.package = enable->package->id;
  779. nca.type = NCSI_PKT_CMD_ECNT;
  780. ret = ncsi_xmit_cmd(&nca);
  781. if (ret)
  782. netdev_err(ndp->ndev.dev,
  783. "Error %d sending ECNT\n",
  784. ret);
  785. return ret;
  786. }
  787. static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
  788. {
  789. struct ncsi_package *np = ndp->active_package;
  790. struct ncsi_channel *nc = ndp->active_channel;
  791. struct ncsi_channel *hot_nc = NULL;
  792. struct ncsi_dev *nd = &ndp->ndev;
  793. struct net_device *dev = nd->dev;
  794. struct ncsi_cmd_arg nca;
  795. unsigned char index;
  796. unsigned long flags;
  797. int ret;
  798. nca.ndp = ndp;
  799. nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
  800. switch (nd->state) {
  801. case ncsi_dev_state_config:
  802. case ncsi_dev_state_config_sp:
  803. ndp->pending_req_num = 1;
  804. /* Select the specific package */
  805. nca.type = NCSI_PKT_CMD_SP;
  806. if (ndp->flags & NCSI_DEV_HWA)
  807. nca.bytes[0] = 0;
  808. else
  809. nca.bytes[0] = 1;
  810. nca.package = np->id;
  811. nca.channel = NCSI_RESERVED_CHANNEL;
  812. ret = ncsi_xmit_cmd(&nca);
  813. if (ret) {
  814. netdev_err(ndp->ndev.dev,
  815. "NCSI: Failed to transmit CMD_SP\n");
  816. goto error;
  817. }
  818. nd->state = ncsi_dev_state_config_cis;
  819. break;
  820. case ncsi_dev_state_config_cis:
  821. ndp->pending_req_num = 1;
  822. /* Clear initial state */
  823. nca.type = NCSI_PKT_CMD_CIS;
  824. nca.package = np->id;
  825. nca.channel = nc->id;
  826. ret = ncsi_xmit_cmd(&nca);
  827. if (ret) {
  828. netdev_err(ndp->ndev.dev,
  829. "NCSI: Failed to transmit CMD_CIS\n");
  830. goto error;
  831. }
  832. nd->state = ncsi_dev_state_config_oem_gma;
  833. break;
  834. case ncsi_dev_state_config_oem_gma:
  835. nd->state = ncsi_dev_state_config_clear_vids;
  836. ret = -1;
  837. #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
  838. nca.type = NCSI_PKT_CMD_OEM;
  839. nca.package = np->id;
  840. nca.channel = nc->id;
  841. ndp->pending_req_num = 1;
  842. ret = ncsi_gma_handler(&nca, nc->version.mf_id);
  843. #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
  844. if (ret < 0)
  845. schedule_work(&ndp->work);
  846. break;
  847. case ncsi_dev_state_config_clear_vids:
  848. case ncsi_dev_state_config_svf:
  849. case ncsi_dev_state_config_ev:
  850. case ncsi_dev_state_config_sma:
  851. case ncsi_dev_state_config_ebf:
  852. case ncsi_dev_state_config_dgmf:
  853. case ncsi_dev_state_config_ecnt:
  854. case ncsi_dev_state_config_ec:
  855. case ncsi_dev_state_config_ae:
  856. case ncsi_dev_state_config_gls:
  857. ndp->pending_req_num = 1;
  858. nca.package = np->id;
  859. nca.channel = nc->id;
  860. /* Clear any active filters on the channel before setting */
  861. if (nd->state == ncsi_dev_state_config_clear_vids) {
  862. ret = clear_one_vid(ndp, nc, &nca);
  863. if (ret) {
  864. nd->state = ncsi_dev_state_config_svf;
  865. schedule_work(&ndp->work);
  866. break;
  867. }
  868. /* Repeat */
  869. nd->state = ncsi_dev_state_config_clear_vids;
  870. /* Add known VLAN tags to the filter */
  871. } else if (nd->state == ncsi_dev_state_config_svf) {
  872. ret = set_one_vid(ndp, nc, &nca);
  873. if (ret) {
  874. nd->state = ncsi_dev_state_config_ev;
  875. schedule_work(&ndp->work);
  876. break;
  877. }
  878. /* Repeat */
  879. nd->state = ncsi_dev_state_config_svf;
  880. /* Enable/Disable the VLAN filter */
  881. } else if (nd->state == ncsi_dev_state_config_ev) {
  882. if (list_empty(&ndp->vlan_vids)) {
  883. nca.type = NCSI_PKT_CMD_DV;
  884. } else {
  885. nca.type = NCSI_PKT_CMD_EV;
  886. nca.bytes[3] = NCSI_CAP_VLAN_NO;
  887. }
  888. nd->state = ncsi_dev_state_config_sma;
  889. } else if (nd->state == ncsi_dev_state_config_sma) {
  890. /* Use first entry in unicast filter table. Note that
  891. * the MAC filter table starts from entry 1 instead of
  892. * 0.
  893. */
  894. nca.type = NCSI_PKT_CMD_SMA;
  895. for (index = 0; index < 6; index++)
  896. nca.bytes[index] = dev->dev_addr[index];
  897. nca.bytes[6] = 0x1;
  898. nca.bytes[7] = 0x1;
  899. nd->state = ncsi_dev_state_config_ebf;
  900. } else if (nd->state == ncsi_dev_state_config_ebf) {
  901. nca.type = NCSI_PKT_CMD_EBF;
  902. nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
  903. /* if multicast global filtering is supported then
  904. * disable it so that all multicast packet will be
  905. * forwarded to management controller
  906. */
  907. if (nc->caps[NCSI_CAP_GENERIC].cap &
  908. NCSI_CAP_GENERIC_MC)
  909. nd->state = ncsi_dev_state_config_dgmf;
  910. else if (ncsi_channel_is_tx(ndp, nc))
  911. nd->state = ncsi_dev_state_config_ecnt;
  912. else
  913. nd->state = ncsi_dev_state_config_ec;
  914. } else if (nd->state == ncsi_dev_state_config_dgmf) {
  915. nca.type = NCSI_PKT_CMD_DGMF;
  916. if (ncsi_channel_is_tx(ndp, nc))
  917. nd->state = ncsi_dev_state_config_ecnt;
  918. else
  919. nd->state = ncsi_dev_state_config_ec;
  920. } else if (nd->state == ncsi_dev_state_config_ecnt) {
  921. if (np->preferred_channel &&
  922. nc != np->preferred_channel)
  923. netdev_info(ndp->ndev.dev,
  924. "NCSI: Tx failed over to channel %u\n",
  925. nc->id);
  926. nca.type = NCSI_PKT_CMD_ECNT;
  927. nd->state = ncsi_dev_state_config_ec;
  928. } else if (nd->state == ncsi_dev_state_config_ec) {
  929. /* Enable AEN if it's supported */
  930. nca.type = NCSI_PKT_CMD_EC;
  931. nd->state = ncsi_dev_state_config_ae;
  932. if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
  933. nd->state = ncsi_dev_state_config_gls;
  934. } else if (nd->state == ncsi_dev_state_config_ae) {
  935. nca.type = NCSI_PKT_CMD_AE;
  936. nca.bytes[0] = 0;
  937. nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
  938. nd->state = ncsi_dev_state_config_gls;
  939. } else if (nd->state == ncsi_dev_state_config_gls) {
  940. nca.type = NCSI_PKT_CMD_GLS;
  941. nd->state = ncsi_dev_state_config_done;
  942. }
  943. ret = ncsi_xmit_cmd(&nca);
  944. if (ret) {
  945. netdev_err(ndp->ndev.dev,
  946. "NCSI: Failed to transmit CMD %x\n",
  947. nca.type);
  948. goto error;
  949. }
  950. break;
  951. case ncsi_dev_state_config_done:
  952. netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
  953. nc->id);
  954. spin_lock_irqsave(&nc->lock, flags);
  955. nc->state = NCSI_CHANNEL_ACTIVE;
  956. if (ndp->flags & NCSI_DEV_RESET) {
  957. /* A reset event happened during config, start it now */
  958. nc->reconfigure_needed = false;
  959. spin_unlock_irqrestore(&nc->lock, flags);
  960. ncsi_reset_dev(nd);
  961. break;
  962. }
  963. if (nc->reconfigure_needed) {
  964. /* This channel's configuration has been updated
  965. * part-way during the config state - start the
  966. * channel configuration over
  967. */
  968. nc->reconfigure_needed = false;
  969. nc->state = NCSI_CHANNEL_INACTIVE;
  970. spin_unlock_irqrestore(&nc->lock, flags);
  971. spin_lock_irqsave(&ndp->lock, flags);
  972. list_add_tail_rcu(&nc->link, &ndp->channel_queue);
  973. spin_unlock_irqrestore(&ndp->lock, flags);
  974. netdev_dbg(dev, "Dirty NCSI channel state reset\n");
  975. ncsi_process_next_channel(ndp);
  976. break;
  977. }
  978. if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
  979. hot_nc = nc;
  980. } else {
  981. hot_nc = NULL;
  982. netdev_dbg(ndp->ndev.dev,
  983. "NCSI: channel %u link down after config\n",
  984. nc->id);
  985. }
  986. spin_unlock_irqrestore(&nc->lock, flags);
  987. /* Update the hot channel */
  988. spin_lock_irqsave(&ndp->lock, flags);
  989. ndp->hot_channel = hot_nc;
  990. spin_unlock_irqrestore(&ndp->lock, flags);
  991. ncsi_start_channel_monitor(nc);
  992. ncsi_process_next_channel(ndp);
  993. break;
  994. default:
  995. netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
  996. nd->state);
  997. }
  998. return;
  999. error:
  1000. ncsi_report_link(ndp, true);
  1001. }
  1002. static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
  1003. {
  1004. struct ncsi_channel *nc, *found, *hot_nc;
  1005. struct ncsi_channel_mode *ncm;
  1006. unsigned long flags, cflags;
  1007. struct ncsi_package *np;
  1008. bool with_link;
  1009. spin_lock_irqsave(&ndp->lock, flags);
  1010. hot_nc = ndp->hot_channel;
  1011. spin_unlock_irqrestore(&ndp->lock, flags);
  1012. /* By default the search is done once an inactive channel with up
  1013. * link is found, unless a preferred channel is set.
  1014. * If multi_package or multi_channel are configured all channels in the
  1015. * whitelist are added to the channel queue.
  1016. */
  1017. found = NULL;
  1018. with_link = false;
  1019. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1020. if (!(ndp->package_whitelist & (0x1 << np->id)))
  1021. continue;
  1022. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1023. if (!(np->channel_whitelist & (0x1 << nc->id)))
  1024. continue;
  1025. spin_lock_irqsave(&nc->lock, cflags);
  1026. if (!list_empty(&nc->link) ||
  1027. nc->state != NCSI_CHANNEL_INACTIVE) {
  1028. spin_unlock_irqrestore(&nc->lock, cflags);
  1029. continue;
  1030. }
  1031. if (!found)
  1032. found = nc;
  1033. if (nc == hot_nc)
  1034. found = nc;
  1035. ncm = &nc->modes[NCSI_MODE_LINK];
  1036. if (ncm->data[2] & 0x1) {
  1037. found = nc;
  1038. with_link = true;
  1039. }
  1040. /* If multi_channel is enabled configure all valid
  1041. * channels whether or not they currently have link
  1042. * so they will have AENs enabled.
  1043. */
  1044. if (with_link || np->multi_channel) {
  1045. spin_lock_irqsave(&ndp->lock, flags);
  1046. list_add_tail_rcu(&nc->link,
  1047. &ndp->channel_queue);
  1048. spin_unlock_irqrestore(&ndp->lock, flags);
  1049. netdev_dbg(ndp->ndev.dev,
  1050. "NCSI: Channel %u added to queue (link %s)\n",
  1051. nc->id,
  1052. ncm->data[2] & 0x1 ? "up" : "down");
  1053. }
  1054. spin_unlock_irqrestore(&nc->lock, cflags);
  1055. if (with_link && !np->multi_channel)
  1056. break;
  1057. }
  1058. if (with_link && !ndp->multi_package)
  1059. break;
  1060. }
  1061. if (list_empty(&ndp->channel_queue) && found) {
  1062. netdev_info(ndp->ndev.dev,
  1063. "NCSI: No channel with link found, configuring channel %u\n",
  1064. found->id);
  1065. spin_lock_irqsave(&ndp->lock, flags);
  1066. list_add_tail_rcu(&found->link, &ndp->channel_queue);
  1067. spin_unlock_irqrestore(&ndp->lock, flags);
  1068. } else if (!found) {
  1069. netdev_warn(ndp->ndev.dev,
  1070. "NCSI: No channel found to configure!\n");
  1071. ncsi_report_link(ndp, true);
  1072. return -ENODEV;
  1073. }
  1074. return ncsi_process_next_channel(ndp);
  1075. }
  1076. static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
  1077. {
  1078. struct ncsi_package *np;
  1079. struct ncsi_channel *nc;
  1080. unsigned int cap;
  1081. bool has_channel = false;
  1082. /* The hardware arbitration is disabled if any one channel
  1083. * doesn't support explicitly.
  1084. */
  1085. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1086. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1087. has_channel = true;
  1088. cap = nc->caps[NCSI_CAP_GENERIC].cap;
  1089. if (!(cap & NCSI_CAP_GENERIC_HWA) ||
  1090. (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
  1091. NCSI_CAP_GENERIC_HWA_SUPPORT) {
  1092. ndp->flags &= ~NCSI_DEV_HWA;
  1093. return false;
  1094. }
  1095. }
  1096. }
  1097. if (has_channel) {
  1098. ndp->flags |= NCSI_DEV_HWA;
  1099. return true;
  1100. }
  1101. ndp->flags &= ~NCSI_DEV_HWA;
  1102. return false;
  1103. }
  1104. static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
  1105. {
  1106. struct ncsi_dev *nd = &ndp->ndev;
  1107. struct ncsi_package *np;
  1108. struct ncsi_channel *nc;
  1109. struct ncsi_cmd_arg nca;
  1110. unsigned char index;
  1111. int ret;
  1112. nca.ndp = ndp;
  1113. nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
  1114. switch (nd->state) {
  1115. case ncsi_dev_state_probe:
  1116. nd->state = ncsi_dev_state_probe_deselect;
  1117. fallthrough;
  1118. case ncsi_dev_state_probe_deselect:
  1119. ndp->pending_req_num = 8;
  1120. /* Deselect all possible packages */
  1121. nca.type = NCSI_PKT_CMD_DP;
  1122. nca.channel = NCSI_RESERVED_CHANNEL;
  1123. for (index = 0; index < 8; index++) {
  1124. nca.package = index;
  1125. ret = ncsi_xmit_cmd(&nca);
  1126. if (ret)
  1127. goto error;
  1128. }
  1129. nd->state = ncsi_dev_state_probe_package;
  1130. break;
  1131. case ncsi_dev_state_probe_package:
  1132. ndp->pending_req_num = 1;
  1133. nca.type = NCSI_PKT_CMD_SP;
  1134. nca.bytes[0] = 1;
  1135. nca.package = ndp->package_probe_id;
  1136. nca.channel = NCSI_RESERVED_CHANNEL;
  1137. ret = ncsi_xmit_cmd(&nca);
  1138. if (ret)
  1139. goto error;
  1140. nd->state = ncsi_dev_state_probe_channel;
  1141. break;
  1142. case ncsi_dev_state_probe_channel:
  1143. ndp->active_package = ncsi_find_package(ndp,
  1144. ndp->package_probe_id);
  1145. if (!ndp->active_package) {
  1146. /* No response */
  1147. nd->state = ncsi_dev_state_probe_dp;
  1148. schedule_work(&ndp->work);
  1149. break;
  1150. }
  1151. nd->state = ncsi_dev_state_probe_cis;
  1152. if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) &&
  1153. ndp->mlx_multi_host)
  1154. nd->state = ncsi_dev_state_probe_mlx_gma;
  1155. schedule_work(&ndp->work);
  1156. break;
  1157. #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
  1158. case ncsi_dev_state_probe_mlx_gma:
  1159. ndp->pending_req_num = 1;
  1160. nca.type = NCSI_PKT_CMD_OEM;
  1161. nca.package = ndp->active_package->id;
  1162. nca.channel = 0;
  1163. ret = ncsi_oem_gma_handler_mlx(&nca);
  1164. if (ret)
  1165. goto error;
  1166. nd->state = ncsi_dev_state_probe_mlx_smaf;
  1167. break;
  1168. case ncsi_dev_state_probe_mlx_smaf:
  1169. ndp->pending_req_num = 1;
  1170. nca.type = NCSI_PKT_CMD_OEM;
  1171. nca.package = ndp->active_package->id;
  1172. nca.channel = 0;
  1173. ret = ncsi_oem_smaf_mlx(&nca);
  1174. if (ret)
  1175. goto error;
  1176. nd->state = ncsi_dev_state_probe_cis;
  1177. break;
  1178. #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
  1179. case ncsi_dev_state_probe_cis:
  1180. ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
  1181. /* Clear initial state */
  1182. nca.type = NCSI_PKT_CMD_CIS;
  1183. nca.package = ndp->active_package->id;
  1184. for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
  1185. nca.channel = index;
  1186. ret = ncsi_xmit_cmd(&nca);
  1187. if (ret)
  1188. goto error;
  1189. }
  1190. nd->state = ncsi_dev_state_probe_gvi;
  1191. break;
  1192. case ncsi_dev_state_probe_gvi:
  1193. case ncsi_dev_state_probe_gc:
  1194. case ncsi_dev_state_probe_gls:
  1195. np = ndp->active_package;
  1196. ndp->pending_req_num = np->channel_num;
  1197. /* Retrieve version, capability or link status */
  1198. if (nd->state == ncsi_dev_state_probe_gvi)
  1199. nca.type = NCSI_PKT_CMD_GVI;
  1200. else if (nd->state == ncsi_dev_state_probe_gc)
  1201. nca.type = NCSI_PKT_CMD_GC;
  1202. else
  1203. nca.type = NCSI_PKT_CMD_GLS;
  1204. nca.package = np->id;
  1205. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1206. nca.channel = nc->id;
  1207. ret = ncsi_xmit_cmd(&nca);
  1208. if (ret)
  1209. goto error;
  1210. }
  1211. if (nd->state == ncsi_dev_state_probe_gvi)
  1212. nd->state = ncsi_dev_state_probe_gc;
  1213. else if (nd->state == ncsi_dev_state_probe_gc)
  1214. nd->state = ncsi_dev_state_probe_gls;
  1215. else
  1216. nd->state = ncsi_dev_state_probe_dp;
  1217. break;
  1218. case ncsi_dev_state_probe_dp:
  1219. ndp->pending_req_num = 1;
  1220. /* Deselect the current package */
  1221. nca.type = NCSI_PKT_CMD_DP;
  1222. nca.package = ndp->package_probe_id;
  1223. nca.channel = NCSI_RESERVED_CHANNEL;
  1224. ret = ncsi_xmit_cmd(&nca);
  1225. if (ret)
  1226. goto error;
  1227. /* Probe next package */
  1228. ndp->package_probe_id++;
  1229. if (ndp->package_probe_id >= 8) {
  1230. /* Probe finished */
  1231. ndp->flags |= NCSI_DEV_PROBED;
  1232. break;
  1233. }
  1234. nd->state = ncsi_dev_state_probe_package;
  1235. ndp->active_package = NULL;
  1236. break;
  1237. default:
  1238. netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
  1239. nd->state);
  1240. }
  1241. if (ndp->flags & NCSI_DEV_PROBED) {
  1242. /* Check if all packages have HWA support */
  1243. ncsi_check_hwa(ndp);
  1244. ncsi_choose_active_channel(ndp);
  1245. }
  1246. return;
  1247. error:
  1248. netdev_err(ndp->ndev.dev,
  1249. "NCSI: Failed to transmit cmd 0x%x during probe\n",
  1250. nca.type);
  1251. ncsi_report_link(ndp, true);
  1252. }
  1253. static void ncsi_dev_work(struct work_struct *work)
  1254. {
  1255. struct ncsi_dev_priv *ndp = container_of(work,
  1256. struct ncsi_dev_priv, work);
  1257. struct ncsi_dev *nd = &ndp->ndev;
  1258. switch (nd->state & ncsi_dev_state_major) {
  1259. case ncsi_dev_state_probe:
  1260. ncsi_probe_channel(ndp);
  1261. break;
  1262. case ncsi_dev_state_suspend:
  1263. ncsi_suspend_channel(ndp);
  1264. break;
  1265. case ncsi_dev_state_config:
  1266. ncsi_configure_channel(ndp);
  1267. break;
  1268. default:
  1269. netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
  1270. nd->state);
  1271. }
  1272. }
  1273. int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
  1274. {
  1275. struct ncsi_channel *nc;
  1276. int old_state;
  1277. unsigned long flags;
  1278. spin_lock_irqsave(&ndp->lock, flags);
  1279. nc = list_first_or_null_rcu(&ndp->channel_queue,
  1280. struct ncsi_channel, link);
  1281. if (!nc) {
  1282. spin_unlock_irqrestore(&ndp->lock, flags);
  1283. goto out;
  1284. }
  1285. list_del_init(&nc->link);
  1286. spin_unlock_irqrestore(&ndp->lock, flags);
  1287. spin_lock_irqsave(&nc->lock, flags);
  1288. old_state = nc->state;
  1289. nc->state = NCSI_CHANNEL_INVISIBLE;
  1290. spin_unlock_irqrestore(&nc->lock, flags);
  1291. ndp->active_channel = nc;
  1292. ndp->active_package = nc->package;
  1293. switch (old_state) {
  1294. case NCSI_CHANNEL_INACTIVE:
  1295. ndp->ndev.state = ncsi_dev_state_config;
  1296. netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
  1297. nc->id);
  1298. ncsi_configure_channel(ndp);
  1299. break;
  1300. case NCSI_CHANNEL_ACTIVE:
  1301. ndp->ndev.state = ncsi_dev_state_suspend;
  1302. netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
  1303. nc->id);
  1304. ncsi_suspend_channel(ndp);
  1305. break;
  1306. default:
  1307. netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
  1308. old_state, nc->package->id, nc->id);
  1309. ncsi_report_link(ndp, false);
  1310. return -EINVAL;
  1311. }
  1312. return 0;
  1313. out:
  1314. ndp->active_channel = NULL;
  1315. ndp->active_package = NULL;
  1316. if (ndp->flags & NCSI_DEV_RESHUFFLE) {
  1317. ndp->flags &= ~NCSI_DEV_RESHUFFLE;
  1318. return ncsi_choose_active_channel(ndp);
  1319. }
  1320. ncsi_report_link(ndp, false);
  1321. return -ENODEV;
  1322. }
  1323. static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
  1324. {
  1325. struct ncsi_dev *nd = &ndp->ndev;
  1326. struct ncsi_channel *nc;
  1327. struct ncsi_package *np;
  1328. unsigned long flags;
  1329. unsigned int n = 0;
  1330. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1331. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1332. spin_lock_irqsave(&nc->lock, flags);
  1333. /* Channels may be busy, mark dirty instead of
  1334. * kicking if;
  1335. * a) not ACTIVE (configured)
  1336. * b) in the channel_queue (to be configured)
  1337. * c) it's ndev is in the config state
  1338. */
  1339. if (nc->state != NCSI_CHANNEL_ACTIVE) {
  1340. if ((ndp->ndev.state & 0xff00) ==
  1341. ncsi_dev_state_config ||
  1342. !list_empty(&nc->link)) {
  1343. netdev_dbg(nd->dev,
  1344. "NCSI: channel %p marked dirty\n",
  1345. nc);
  1346. nc->reconfigure_needed = true;
  1347. }
  1348. spin_unlock_irqrestore(&nc->lock, flags);
  1349. continue;
  1350. }
  1351. spin_unlock_irqrestore(&nc->lock, flags);
  1352. ncsi_stop_channel_monitor(nc);
  1353. spin_lock_irqsave(&nc->lock, flags);
  1354. nc->state = NCSI_CHANNEL_INACTIVE;
  1355. spin_unlock_irqrestore(&nc->lock, flags);
  1356. spin_lock_irqsave(&ndp->lock, flags);
  1357. list_add_tail_rcu(&nc->link, &ndp->channel_queue);
  1358. spin_unlock_irqrestore(&ndp->lock, flags);
  1359. netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
  1360. n++;
  1361. }
  1362. }
  1363. return n;
  1364. }
  1365. int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
  1366. {
  1367. struct ncsi_dev_priv *ndp;
  1368. unsigned int n_vids = 0;
  1369. struct vlan_vid *vlan;
  1370. struct ncsi_dev *nd;
  1371. bool found = false;
  1372. if (vid == 0)
  1373. return 0;
  1374. nd = ncsi_find_dev(dev);
  1375. if (!nd) {
  1376. netdev_warn(dev, "NCSI: No net_device?\n");
  1377. return 0;
  1378. }
  1379. ndp = TO_NCSI_DEV_PRIV(nd);
  1380. /* Add the VLAN id to our internal list */
  1381. list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
  1382. n_vids++;
  1383. if (vlan->vid == vid) {
  1384. netdev_dbg(dev, "NCSI: vid %u already registered\n",
  1385. vid);
  1386. return 0;
  1387. }
  1388. }
  1389. if (n_vids >= NCSI_MAX_VLAN_VIDS) {
  1390. netdev_warn(dev,
  1391. "tried to add vlan id %u but NCSI max already registered (%u)\n",
  1392. vid, NCSI_MAX_VLAN_VIDS);
  1393. return -ENOSPC;
  1394. }
  1395. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  1396. if (!vlan)
  1397. return -ENOMEM;
  1398. vlan->proto = proto;
  1399. vlan->vid = vid;
  1400. list_add_rcu(&vlan->list, &ndp->vlan_vids);
  1401. netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
  1402. found = ncsi_kick_channels(ndp) != 0;
  1403. return found ? ncsi_process_next_channel(ndp) : 0;
  1404. }
  1405. EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
  1406. int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
  1407. {
  1408. struct vlan_vid *vlan, *tmp;
  1409. struct ncsi_dev_priv *ndp;
  1410. struct ncsi_dev *nd;
  1411. bool found = false;
  1412. if (vid == 0)
  1413. return 0;
  1414. nd = ncsi_find_dev(dev);
  1415. if (!nd) {
  1416. netdev_warn(dev, "NCSI: no net_device?\n");
  1417. return 0;
  1418. }
  1419. ndp = TO_NCSI_DEV_PRIV(nd);
  1420. /* Remove the VLAN id from our internal list */
  1421. list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
  1422. if (vlan->vid == vid) {
  1423. netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
  1424. list_del_rcu(&vlan->list);
  1425. found = true;
  1426. kfree(vlan);
  1427. }
  1428. if (!found) {
  1429. netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
  1430. return -EINVAL;
  1431. }
  1432. found = ncsi_kick_channels(ndp) != 0;
  1433. return found ? ncsi_process_next_channel(ndp) : 0;
  1434. }
  1435. EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
  1436. struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
  1437. void (*handler)(struct ncsi_dev *ndev))
  1438. {
  1439. struct ncsi_dev_priv *ndp;
  1440. struct ncsi_dev *nd;
  1441. struct platform_device *pdev;
  1442. struct device_node *np;
  1443. unsigned long flags;
  1444. int i;
  1445. /* Check if the device has been registered or not */
  1446. nd = ncsi_find_dev(dev);
  1447. if (nd)
  1448. return nd;
  1449. /* Create NCSI device */
  1450. ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
  1451. if (!ndp)
  1452. return NULL;
  1453. nd = &ndp->ndev;
  1454. nd->state = ncsi_dev_state_registered;
  1455. nd->dev = dev;
  1456. nd->handler = handler;
  1457. ndp->pending_req_num = 0;
  1458. INIT_LIST_HEAD(&ndp->channel_queue);
  1459. INIT_LIST_HEAD(&ndp->vlan_vids);
  1460. INIT_WORK(&ndp->work, ncsi_dev_work);
  1461. ndp->package_whitelist = UINT_MAX;
  1462. /* Initialize private NCSI device */
  1463. spin_lock_init(&ndp->lock);
  1464. INIT_LIST_HEAD(&ndp->packages);
  1465. ndp->request_id = NCSI_REQ_START_IDX;
  1466. for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
  1467. ndp->requests[i].id = i;
  1468. ndp->requests[i].ndp = ndp;
  1469. timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
  1470. }
  1471. spin_lock_irqsave(&ncsi_dev_lock, flags);
  1472. list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
  1473. spin_unlock_irqrestore(&ncsi_dev_lock, flags);
  1474. /* Register NCSI packet Rx handler */
  1475. ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
  1476. ndp->ptype.func = ncsi_rcv_rsp;
  1477. ndp->ptype.dev = dev;
  1478. dev_add_pack(&ndp->ptype);
  1479. pdev = to_platform_device(dev->dev.parent);
  1480. if (pdev) {
  1481. np = pdev->dev.of_node;
  1482. if (np && of_get_property(np, "mlx,multi-host", NULL))
  1483. ndp->mlx_multi_host = true;
  1484. }
  1485. return nd;
  1486. }
  1487. EXPORT_SYMBOL_GPL(ncsi_register_dev);
  1488. int ncsi_start_dev(struct ncsi_dev *nd)
  1489. {
  1490. struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
  1491. if (nd->state != ncsi_dev_state_registered &&
  1492. nd->state != ncsi_dev_state_functional)
  1493. return -ENOTTY;
  1494. if (!(ndp->flags & NCSI_DEV_PROBED)) {
  1495. ndp->package_probe_id = 0;
  1496. nd->state = ncsi_dev_state_probe;
  1497. schedule_work(&ndp->work);
  1498. return 0;
  1499. }
  1500. return ncsi_reset_dev(nd);
  1501. }
  1502. EXPORT_SYMBOL_GPL(ncsi_start_dev);
  1503. void ncsi_stop_dev(struct ncsi_dev *nd)
  1504. {
  1505. struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
  1506. struct ncsi_package *np;
  1507. struct ncsi_channel *nc;
  1508. bool chained;
  1509. int old_state;
  1510. unsigned long flags;
  1511. /* Stop the channel monitor on any active channels. Don't reset the
  1512. * channel state so we know which were active when ncsi_start_dev()
  1513. * is next called.
  1514. */
  1515. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1516. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1517. ncsi_stop_channel_monitor(nc);
  1518. spin_lock_irqsave(&nc->lock, flags);
  1519. chained = !list_empty(&nc->link);
  1520. old_state = nc->state;
  1521. spin_unlock_irqrestore(&nc->lock, flags);
  1522. WARN_ON_ONCE(chained ||
  1523. old_state == NCSI_CHANNEL_INVISIBLE);
  1524. }
  1525. }
  1526. netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
  1527. ncsi_report_link(ndp, true);
  1528. }
  1529. EXPORT_SYMBOL_GPL(ncsi_stop_dev);
  1530. int ncsi_reset_dev(struct ncsi_dev *nd)
  1531. {
  1532. struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
  1533. struct ncsi_channel *nc, *active, *tmp;
  1534. struct ncsi_package *np;
  1535. unsigned long flags;
  1536. spin_lock_irqsave(&ndp->lock, flags);
  1537. if (!(ndp->flags & NCSI_DEV_RESET)) {
  1538. /* Haven't been called yet, check states */
  1539. switch (nd->state & ncsi_dev_state_major) {
  1540. case ncsi_dev_state_registered:
  1541. case ncsi_dev_state_probe:
  1542. /* Not even probed yet - do nothing */
  1543. spin_unlock_irqrestore(&ndp->lock, flags);
  1544. return 0;
  1545. case ncsi_dev_state_suspend:
  1546. case ncsi_dev_state_config:
  1547. /* Wait for the channel to finish its suspend/config
  1548. * operation; once it finishes it will check for
  1549. * NCSI_DEV_RESET and reset the state.
  1550. */
  1551. ndp->flags |= NCSI_DEV_RESET;
  1552. spin_unlock_irqrestore(&ndp->lock, flags);
  1553. return 0;
  1554. }
  1555. } else {
  1556. switch (nd->state) {
  1557. case ncsi_dev_state_suspend_done:
  1558. case ncsi_dev_state_config_done:
  1559. case ncsi_dev_state_functional:
  1560. /* Ok */
  1561. break;
  1562. default:
  1563. /* Current reset operation happening */
  1564. spin_unlock_irqrestore(&ndp->lock, flags);
  1565. return 0;
  1566. }
  1567. }
  1568. if (!list_empty(&ndp->channel_queue)) {
  1569. /* Clear any channel queue we may have interrupted */
  1570. list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
  1571. list_del_init(&nc->link);
  1572. }
  1573. spin_unlock_irqrestore(&ndp->lock, flags);
  1574. active = NULL;
  1575. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1576. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1577. spin_lock_irqsave(&nc->lock, flags);
  1578. if (nc->state == NCSI_CHANNEL_ACTIVE) {
  1579. active = nc;
  1580. nc->state = NCSI_CHANNEL_INVISIBLE;
  1581. spin_unlock_irqrestore(&nc->lock, flags);
  1582. ncsi_stop_channel_monitor(nc);
  1583. break;
  1584. }
  1585. spin_unlock_irqrestore(&nc->lock, flags);
  1586. }
  1587. if (active)
  1588. break;
  1589. }
  1590. if (!active) {
  1591. /* Done */
  1592. spin_lock_irqsave(&ndp->lock, flags);
  1593. ndp->flags &= ~NCSI_DEV_RESET;
  1594. spin_unlock_irqrestore(&ndp->lock, flags);
  1595. return ncsi_choose_active_channel(ndp);
  1596. }
  1597. spin_lock_irqsave(&ndp->lock, flags);
  1598. ndp->flags |= NCSI_DEV_RESET;
  1599. ndp->active_channel = active;
  1600. ndp->active_package = active->package;
  1601. spin_unlock_irqrestore(&ndp->lock, flags);
  1602. nd->state = ncsi_dev_state_suspend;
  1603. schedule_work(&ndp->work);
  1604. return 0;
  1605. }
  1606. void ncsi_unregister_dev(struct ncsi_dev *nd)
  1607. {
  1608. struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
  1609. struct ncsi_package *np, *tmp;
  1610. unsigned long flags;
  1611. dev_remove_pack(&ndp->ptype);
  1612. list_for_each_entry_safe(np, tmp, &ndp->packages, node)
  1613. ncsi_remove_package(np);
  1614. spin_lock_irqsave(&ncsi_dev_lock, flags);
  1615. list_del_rcu(&ndp->node);
  1616. spin_unlock_irqrestore(&ncsi_dev_lock, flags);
  1617. kfree(ndp);
  1618. }
  1619. EXPORT_SYMBOL_GPL(ncsi_unregister_dev);