rndis_filter.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2009, Microsoft Corporation.
  4. *
  5. * Authors:
  6. * Haiyang Zhang <haiyangz@microsoft.com>
  7. * Hank Janssen <hjanssen@microsoft.com>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/sched.h>
  11. #include <linux/wait.h>
  12. #include <linux/highmem.h>
  13. #include <linux/slab.h>
  14. #include <linux/io.h>
  15. #include <linux/if_ether.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/if_vlan.h>
  18. #include <linux/nls.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/rtnetlink.h>
  21. #include <linux/ucs2_string.h>
  22. #include "hyperv_net.h"
  23. #include "netvsc_trace.h"
  24. static void rndis_set_multicast(struct work_struct *w);
  25. #define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
  26. struct rndis_request {
  27. struct list_head list_ent;
  28. struct completion wait_event;
  29. struct rndis_message response_msg;
  30. /*
  31. * The buffer for extended info after the RNDIS response message. It's
  32. * referenced based on the data offset in the RNDIS message. Its size
  33. * is enough for current needs, and should be sufficient for the near
  34. * future.
  35. */
  36. u8 response_ext[RNDIS_EXT_LEN];
  37. /* Simplify allocation by having a netvsc packet inline */
  38. struct hv_netvsc_packet pkt;
  39. struct rndis_message request_msg;
  40. /*
  41. * The buffer for the extended info after the RNDIS request message.
  42. * It is referenced and sized in a similar way as response_ext.
  43. */
  44. u8 request_ext[RNDIS_EXT_LEN];
  45. };
  46. static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
  47. 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
  48. 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
  49. 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
  50. 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
  51. 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
  52. };
  53. static struct rndis_device *get_rndis_device(void)
  54. {
  55. struct rndis_device *device;
  56. device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
  57. if (!device)
  58. return NULL;
  59. spin_lock_init(&device->request_lock);
  60. INIT_LIST_HEAD(&device->req_list);
  61. INIT_WORK(&device->mcast_work, rndis_set_multicast);
  62. device->state = RNDIS_DEV_UNINITIALIZED;
  63. return device;
  64. }
  65. static struct rndis_request *get_rndis_request(struct rndis_device *dev,
  66. u32 msg_type,
  67. u32 msg_len)
  68. {
  69. struct rndis_request *request;
  70. struct rndis_message *rndis_msg;
  71. struct rndis_set_request *set;
  72. unsigned long flags;
  73. request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
  74. if (!request)
  75. return NULL;
  76. init_completion(&request->wait_event);
  77. rndis_msg = &request->request_msg;
  78. rndis_msg->ndis_msg_type = msg_type;
  79. rndis_msg->msg_len = msg_len;
  80. request->pkt.q_idx = 0;
  81. /*
  82. * Set the request id. This field is always after the rndis header for
  83. * request/response packet types so we just used the SetRequest as a
  84. * template
  85. */
  86. set = &rndis_msg->msg.set_req;
  87. set->req_id = atomic_inc_return(&dev->new_req_id);
  88. /* Add to the request list */
  89. spin_lock_irqsave(&dev->request_lock, flags);
  90. list_add_tail(&request->list_ent, &dev->req_list);
  91. spin_unlock_irqrestore(&dev->request_lock, flags);
  92. return request;
  93. }
  94. static void put_rndis_request(struct rndis_device *dev,
  95. struct rndis_request *req)
  96. {
  97. unsigned long flags;
  98. spin_lock_irqsave(&dev->request_lock, flags);
  99. list_del(&req->list_ent);
  100. spin_unlock_irqrestore(&dev->request_lock, flags);
  101. kfree(req);
  102. }
  103. static void dump_rndis_message(struct net_device *netdev,
  104. const struct rndis_message *rndis_msg)
  105. {
  106. switch (rndis_msg->ndis_msg_type) {
  107. case RNDIS_MSG_PACKET:
  108. netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
  109. "data offset %u data len %u, # oob %u, "
  110. "oob offset %u, oob len %u, pkt offset %u, "
  111. "pkt len %u\n",
  112. rndis_msg->msg_len,
  113. rndis_msg->msg.pkt.data_offset,
  114. rndis_msg->msg.pkt.data_len,
  115. rndis_msg->msg.pkt.num_oob_data_elements,
  116. rndis_msg->msg.pkt.oob_data_offset,
  117. rndis_msg->msg.pkt.oob_data_len,
  118. rndis_msg->msg.pkt.per_pkt_info_offset,
  119. rndis_msg->msg.pkt.per_pkt_info_len);
  120. break;
  121. case RNDIS_MSG_INIT_C:
  122. netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
  123. "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
  124. "device flags %d, max xfer size 0x%x, max pkts %u, "
  125. "pkt aligned %u)\n",
  126. rndis_msg->msg_len,
  127. rndis_msg->msg.init_complete.req_id,
  128. rndis_msg->msg.init_complete.status,
  129. rndis_msg->msg.init_complete.major_ver,
  130. rndis_msg->msg.init_complete.minor_ver,
  131. rndis_msg->msg.init_complete.dev_flags,
  132. rndis_msg->msg.init_complete.max_xfer_size,
  133. rndis_msg->msg.init_complete.
  134. max_pkt_per_msg,
  135. rndis_msg->msg.init_complete.
  136. pkt_alignment_factor);
  137. break;
  138. case RNDIS_MSG_QUERY_C:
  139. netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
  140. "(len %u, id 0x%x, status 0x%x, buf len %u, "
  141. "buf offset %u)\n",
  142. rndis_msg->msg_len,
  143. rndis_msg->msg.query_complete.req_id,
  144. rndis_msg->msg.query_complete.status,
  145. rndis_msg->msg.query_complete.
  146. info_buflen,
  147. rndis_msg->msg.query_complete.
  148. info_buf_offset);
  149. break;
  150. case RNDIS_MSG_SET_C:
  151. netdev_dbg(netdev,
  152. "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
  153. rndis_msg->msg_len,
  154. rndis_msg->msg.set_complete.req_id,
  155. rndis_msg->msg.set_complete.status);
  156. break;
  157. case RNDIS_MSG_INDICATE:
  158. netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
  159. "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
  160. rndis_msg->msg_len,
  161. rndis_msg->msg.indicate_status.status,
  162. rndis_msg->msg.indicate_status.status_buflen,
  163. rndis_msg->msg.indicate_status.status_buf_offset);
  164. break;
  165. default:
  166. netdev_dbg(netdev, "0x%x (len %u)\n",
  167. rndis_msg->ndis_msg_type,
  168. rndis_msg->msg_len);
  169. break;
  170. }
  171. }
  172. static int rndis_filter_send_request(struct rndis_device *dev,
  173. struct rndis_request *req)
  174. {
  175. struct hv_netvsc_packet *packet;
  176. struct hv_page_buffer page_buf[2];
  177. struct hv_page_buffer *pb = page_buf;
  178. int ret;
  179. /* Setup the packet to send it */
  180. packet = &req->pkt;
  181. packet->total_data_buflen = req->request_msg.msg_len;
  182. packet->page_buf_cnt = 1;
  183. pb[0].pfn = virt_to_phys(&req->request_msg) >>
  184. HV_HYP_PAGE_SHIFT;
  185. pb[0].len = req->request_msg.msg_len;
  186. pb[0].offset = offset_in_hvpage(&req->request_msg);
  187. /* Add one page_buf when request_msg crossing page boundary */
  188. if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
  189. packet->page_buf_cnt++;
  190. pb[0].len = HV_HYP_PAGE_SIZE -
  191. pb[0].offset;
  192. pb[1].pfn = virt_to_phys((void *)&req->request_msg
  193. + pb[0].len) >> HV_HYP_PAGE_SHIFT;
  194. pb[1].offset = 0;
  195. pb[1].len = req->request_msg.msg_len -
  196. pb[0].len;
  197. }
  198. trace_rndis_send(dev->ndev, 0, &req->request_msg);
  199. rcu_read_lock_bh();
  200. ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
  201. rcu_read_unlock_bh();
  202. return ret;
  203. }
  204. static void rndis_set_link_state(struct rndis_device *rdev,
  205. struct rndis_request *request)
  206. {
  207. u32 link_status;
  208. struct rndis_query_complete *query_complete;
  209. query_complete = &request->response_msg.msg.query_complete;
  210. if (query_complete->status == RNDIS_STATUS_SUCCESS &&
  211. query_complete->info_buflen == sizeof(u32)) {
  212. memcpy(&link_status, (void *)((unsigned long)query_complete +
  213. query_complete->info_buf_offset), sizeof(u32));
  214. rdev->link_state = link_status != 0;
  215. }
  216. }
  217. static void rndis_filter_receive_response(struct net_device *ndev,
  218. struct netvsc_device *nvdev,
  219. const struct rndis_message *resp)
  220. {
  221. struct rndis_device *dev = nvdev->extension;
  222. struct rndis_request *request = NULL;
  223. bool found = false;
  224. unsigned long flags;
  225. /* This should never happen, it means control message
  226. * response received after device removed.
  227. */
  228. if (dev->state == RNDIS_DEV_UNINITIALIZED) {
  229. netdev_err(ndev,
  230. "got rndis message uninitialized\n");
  231. return;
  232. }
  233. /* Ensure the packet is big enough to read req_id. Req_id is the 1st
  234. * field in any request/response message, so the payload should have at
  235. * least sizeof(u32) bytes
  236. */
  237. if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) {
  238. netdev_err(ndev, "rndis msg_len too small: %u\n",
  239. resp->msg_len);
  240. return;
  241. }
  242. spin_lock_irqsave(&dev->request_lock, flags);
  243. list_for_each_entry(request, &dev->req_list, list_ent) {
  244. /*
  245. * All request/response message contains RequestId as the 1st
  246. * field
  247. */
  248. if (request->request_msg.msg.init_req.req_id
  249. == resp->msg.init_complete.req_id) {
  250. found = true;
  251. break;
  252. }
  253. }
  254. spin_unlock_irqrestore(&dev->request_lock, flags);
  255. if (found) {
  256. if (resp->msg_len <=
  257. sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
  258. memcpy(&request->response_msg, resp,
  259. resp->msg_len);
  260. if (request->request_msg.ndis_msg_type ==
  261. RNDIS_MSG_QUERY && request->request_msg.msg.
  262. query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
  263. rndis_set_link_state(dev, request);
  264. } else {
  265. netdev_err(ndev,
  266. "rndis response buffer overflow "
  267. "detected (size %u max %zu)\n",
  268. resp->msg_len,
  269. sizeof(struct rndis_message));
  270. if (resp->ndis_msg_type ==
  271. RNDIS_MSG_RESET_C) {
  272. /* does not have a request id field */
  273. request->response_msg.msg.reset_complete.
  274. status = RNDIS_STATUS_BUFFER_OVERFLOW;
  275. } else {
  276. request->response_msg.msg.
  277. init_complete.status =
  278. RNDIS_STATUS_BUFFER_OVERFLOW;
  279. }
  280. }
  281. complete(&request->wait_event);
  282. } else {
  283. netdev_err(ndev,
  284. "no rndis request found for this response "
  285. "(id 0x%x res type 0x%x)\n",
  286. resp->msg.init_complete.req_id,
  287. resp->ndis_msg_type);
  288. }
  289. }
  290. /*
  291. * Get the Per-Packet-Info with the specified type
  292. * return NULL if not found.
  293. */
  294. static inline void *rndis_get_ppi(struct net_device *ndev,
  295. struct rndis_packet *rpkt,
  296. u32 rpkt_len, u32 type, u8 internal)
  297. {
  298. struct rndis_per_packet_info *ppi;
  299. int len;
  300. if (rpkt->per_pkt_info_offset == 0)
  301. return NULL;
  302. /* Validate info_offset and info_len */
  303. if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) ||
  304. rpkt->per_pkt_info_offset > rpkt_len) {
  305. netdev_err(ndev, "Invalid per_pkt_info_offset: %u\n",
  306. rpkt->per_pkt_info_offset);
  307. return NULL;
  308. }
  309. if (rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
  310. netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
  311. rpkt->per_pkt_info_len);
  312. return NULL;
  313. }
  314. ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
  315. rpkt->per_pkt_info_offset);
  316. len = rpkt->per_pkt_info_len;
  317. while (len > 0) {
  318. /* Validate ppi_offset and ppi_size */
  319. if (ppi->size > len) {
  320. netdev_err(ndev, "Invalid ppi size: %u\n", ppi->size);
  321. continue;
  322. }
  323. if (ppi->ppi_offset >= ppi->size) {
  324. netdev_err(ndev, "Invalid ppi_offset: %u\n", ppi->ppi_offset);
  325. continue;
  326. }
  327. if (ppi->type == type && ppi->internal == internal)
  328. return (void *)((ulong)ppi + ppi->ppi_offset);
  329. len -= ppi->size;
  330. ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
  331. }
  332. return NULL;
  333. }
  334. static inline
  335. void rsc_add_data(struct netvsc_channel *nvchan,
  336. const struct ndis_pkt_8021q_info *vlan,
  337. const struct ndis_tcp_ip_checksum_info *csum_info,
  338. const u32 *hash_info,
  339. void *data, u32 len)
  340. {
  341. u32 cnt = nvchan->rsc.cnt;
  342. if (cnt) {
  343. nvchan->rsc.pktlen += len;
  344. } else {
  345. nvchan->rsc.vlan = vlan;
  346. nvchan->rsc.csum_info = csum_info;
  347. nvchan->rsc.pktlen = len;
  348. nvchan->rsc.hash_info = hash_info;
  349. }
  350. nvchan->rsc.data[cnt] = data;
  351. nvchan->rsc.len[cnt] = len;
  352. nvchan->rsc.cnt++;
  353. }
  354. static int rndis_filter_receive_data(struct net_device *ndev,
  355. struct netvsc_device *nvdev,
  356. struct netvsc_channel *nvchan,
  357. struct rndis_message *msg,
  358. u32 data_buflen)
  359. {
  360. struct rndis_packet *rndis_pkt = &msg->msg.pkt;
  361. const struct ndis_tcp_ip_checksum_info *csum_info;
  362. const struct ndis_pkt_8021q_info *vlan;
  363. const struct rndis_pktinfo_id *pktinfo_id;
  364. const u32 *hash_info;
  365. u32 data_offset, rpkt_len;
  366. void *data;
  367. bool rsc_more = false;
  368. int ret;
  369. /* Ensure data_buflen is big enough to read header fields */
  370. if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) {
  371. netdev_err(ndev, "invalid rndis pkt, data_buflen too small: %u\n",
  372. data_buflen);
  373. return NVSP_STAT_FAIL;
  374. }
  375. /* Validate rndis_pkt offset */
  376. if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
  377. netdev_err(ndev, "invalid rndis packet offset: %u\n",
  378. rndis_pkt->data_offset);
  379. return NVSP_STAT_FAIL;
  380. }
  381. /* Remove the rndis header and pass it back up the stack */
  382. data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
  383. rpkt_len = data_buflen - RNDIS_HEADER_SIZE;
  384. data_buflen -= data_offset;
  385. /*
  386. * Make sure we got a valid RNDIS message, now total_data_buflen
  387. * should be the data packet size plus the trailer padding size
  388. */
  389. if (unlikely(data_buflen < rndis_pkt->data_len)) {
  390. netdev_err(ndev, "rndis message buffer "
  391. "overflow detected (got %u, min %u)"
  392. "...dropping this message!\n",
  393. data_buflen, rndis_pkt->data_len);
  394. return NVSP_STAT_FAIL;
  395. }
  396. vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0);
  397. csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0);
  398. hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0);
  399. pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1);
  400. data = (void *)msg + data_offset;
  401. /* Identify RSC frags, drop erroneous packets */
  402. if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
  403. if (pktinfo_id->flag & RNDIS_PKTINFO_1ST_FRAG)
  404. nvchan->rsc.cnt = 0;
  405. else if (nvchan->rsc.cnt == 0)
  406. goto drop;
  407. rsc_more = true;
  408. if (pktinfo_id->flag & RNDIS_PKTINFO_LAST_FRAG)
  409. rsc_more = false;
  410. if (rsc_more && nvchan->rsc.is_last)
  411. goto drop;
  412. } else {
  413. nvchan->rsc.cnt = 0;
  414. }
  415. if (unlikely(nvchan->rsc.cnt >= NVSP_RSC_MAX))
  416. goto drop;
  417. /* Put data into per channel structure.
  418. * Also, remove the rndis trailer padding from rndis packet message
  419. * rndis_pkt->data_len tell us the real data length, we only copy
  420. * the data packet to the stack, without the rndis trailer padding
  421. */
  422. rsc_add_data(nvchan, vlan, csum_info, hash_info,
  423. data, rndis_pkt->data_len);
  424. if (rsc_more)
  425. return NVSP_STAT_SUCCESS;
  426. ret = netvsc_recv_callback(ndev, nvdev, nvchan);
  427. nvchan->rsc.cnt = 0;
  428. return ret;
  429. drop:
  430. return NVSP_STAT_FAIL;
  431. }
  432. int rndis_filter_receive(struct net_device *ndev,
  433. struct netvsc_device *net_dev,
  434. struct netvsc_channel *nvchan,
  435. void *data, u32 buflen)
  436. {
  437. struct net_device_context *net_device_ctx = netdev_priv(ndev);
  438. struct rndis_message *rndis_msg = data;
  439. if (netif_msg_rx_status(net_device_ctx))
  440. dump_rndis_message(ndev, rndis_msg);
  441. /* Validate incoming rndis_message packet */
  442. if (buflen < RNDIS_HEADER_SIZE || rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
  443. buflen < rndis_msg->msg_len) {
  444. netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
  445. buflen, rndis_msg->msg_len);
  446. return NVSP_STAT_FAIL;
  447. }
  448. switch (rndis_msg->ndis_msg_type) {
  449. case RNDIS_MSG_PACKET:
  450. return rndis_filter_receive_data(ndev, net_dev, nvchan,
  451. rndis_msg, buflen);
  452. case RNDIS_MSG_INIT_C:
  453. case RNDIS_MSG_QUERY_C:
  454. case RNDIS_MSG_SET_C:
  455. /* completion msgs */
  456. rndis_filter_receive_response(ndev, net_dev, rndis_msg);
  457. break;
  458. case RNDIS_MSG_INDICATE:
  459. /* notification msgs */
  460. netvsc_linkstatus_callback(ndev, rndis_msg);
  461. break;
  462. default:
  463. netdev_err(ndev,
  464. "unhandled rndis message (type %u len %u)\n",
  465. rndis_msg->ndis_msg_type,
  466. rndis_msg->msg_len);
  467. return NVSP_STAT_FAIL;
  468. }
  469. return NVSP_STAT_SUCCESS;
  470. }
  471. static int rndis_filter_query_device(struct rndis_device *dev,
  472. struct netvsc_device *nvdev,
  473. u32 oid, void *result, u32 *result_size)
  474. {
  475. struct rndis_request *request;
  476. u32 inresult_size = *result_size;
  477. struct rndis_query_request *query;
  478. struct rndis_query_complete *query_complete;
  479. int ret = 0;
  480. if (!result)
  481. return -EINVAL;
  482. *result_size = 0;
  483. request = get_rndis_request(dev, RNDIS_MSG_QUERY,
  484. RNDIS_MESSAGE_SIZE(struct rndis_query_request));
  485. if (!request) {
  486. ret = -ENOMEM;
  487. goto cleanup;
  488. }
  489. /* Setup the rndis query */
  490. query = &request->request_msg.msg.query_req;
  491. query->oid = oid;
  492. query->info_buf_offset = sizeof(struct rndis_query_request);
  493. query->info_buflen = 0;
  494. query->dev_vc_handle = 0;
  495. if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
  496. struct ndis_offload *hwcaps;
  497. u32 nvsp_version = nvdev->nvsp_version;
  498. u8 ndis_rev;
  499. size_t size;
  500. if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
  501. ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
  502. size = NDIS_OFFLOAD_SIZE;
  503. } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
  504. ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
  505. size = NDIS_OFFLOAD_SIZE_6_1;
  506. } else {
  507. ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
  508. size = NDIS_OFFLOAD_SIZE_6_0;
  509. }
  510. request->request_msg.msg_len += size;
  511. query->info_buflen = size;
  512. hwcaps = (struct ndis_offload *)
  513. ((unsigned long)query + query->info_buf_offset);
  514. hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
  515. hwcaps->header.revision = ndis_rev;
  516. hwcaps->header.size = size;
  517. } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
  518. struct ndis_recv_scale_cap *cap;
  519. request->request_msg.msg_len +=
  520. sizeof(struct ndis_recv_scale_cap);
  521. query->info_buflen = sizeof(struct ndis_recv_scale_cap);
  522. cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
  523. query->info_buf_offset);
  524. cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
  525. cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
  526. cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
  527. }
  528. ret = rndis_filter_send_request(dev, request);
  529. if (ret != 0)
  530. goto cleanup;
  531. wait_for_completion(&request->wait_event);
  532. /* Copy the response back */
  533. query_complete = &request->response_msg.msg.query_complete;
  534. if (query_complete->info_buflen > inresult_size) {
  535. ret = -1;
  536. goto cleanup;
  537. }
  538. memcpy(result,
  539. (void *)((unsigned long)query_complete +
  540. query_complete->info_buf_offset),
  541. query_complete->info_buflen);
  542. *result_size = query_complete->info_buflen;
  543. cleanup:
  544. if (request)
  545. put_rndis_request(dev, request);
  546. return ret;
  547. }
  548. /* Get the hardware offload capabilities */
  549. static int
  550. rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
  551. struct ndis_offload *caps)
  552. {
  553. u32 caps_len = sizeof(*caps);
  554. int ret;
  555. memset(caps, 0, sizeof(*caps));
  556. ret = rndis_filter_query_device(dev, net_device,
  557. OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
  558. caps, &caps_len);
  559. if (ret)
  560. return ret;
  561. if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
  562. netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
  563. caps->header.type);
  564. return -EINVAL;
  565. }
  566. if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
  567. netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
  568. caps->header.revision);
  569. return -EINVAL;
  570. }
  571. if (caps->header.size > caps_len ||
  572. caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
  573. netdev_warn(dev->ndev,
  574. "invalid NDIS objsize %u, data size %u\n",
  575. caps->header.size, caps_len);
  576. return -EINVAL;
  577. }
  578. return 0;
  579. }
  580. static int rndis_filter_query_device_mac(struct rndis_device *dev,
  581. struct netvsc_device *net_device)
  582. {
  583. u32 size = ETH_ALEN;
  584. return rndis_filter_query_device(dev, net_device,
  585. RNDIS_OID_802_3_PERMANENT_ADDRESS,
  586. dev->hw_mac_adr, &size);
  587. }
  588. #define NWADR_STR "NetworkAddress"
  589. #define NWADR_STRLEN 14
  590. int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
  591. const char *mac)
  592. {
  593. struct rndis_device *rdev = nvdev->extension;
  594. struct rndis_request *request;
  595. struct rndis_set_request *set;
  596. struct rndis_config_parameter_info *cpi;
  597. wchar_t *cfg_nwadr, *cfg_mac;
  598. struct rndis_set_complete *set_complete;
  599. char macstr[2*ETH_ALEN+1];
  600. u32 extlen = sizeof(struct rndis_config_parameter_info) +
  601. 2*NWADR_STRLEN + 4*ETH_ALEN;
  602. int ret;
  603. request = get_rndis_request(rdev, RNDIS_MSG_SET,
  604. RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
  605. if (!request)
  606. return -ENOMEM;
  607. set = &request->request_msg.msg.set_req;
  608. set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
  609. set->info_buflen = extlen;
  610. set->info_buf_offset = sizeof(struct rndis_set_request);
  611. set->dev_vc_handle = 0;
  612. cpi = (struct rndis_config_parameter_info *)((ulong)set +
  613. set->info_buf_offset);
  614. cpi->parameter_name_offset =
  615. sizeof(struct rndis_config_parameter_info);
  616. /* Multiply by 2 because host needs 2 bytes (utf16) for each char */
  617. cpi->parameter_name_length = 2*NWADR_STRLEN;
  618. cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
  619. cpi->parameter_value_offset =
  620. cpi->parameter_name_offset + cpi->parameter_name_length;
  621. /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
  622. cpi->parameter_value_length = 4*ETH_ALEN;
  623. cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
  624. cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
  625. ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
  626. cfg_nwadr, NWADR_STRLEN);
  627. if (ret < 0)
  628. goto cleanup;
  629. snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
  630. ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
  631. cfg_mac, 2*ETH_ALEN);
  632. if (ret < 0)
  633. goto cleanup;
  634. ret = rndis_filter_send_request(rdev, request);
  635. if (ret != 0)
  636. goto cleanup;
  637. wait_for_completion(&request->wait_event);
  638. set_complete = &request->response_msg.msg.set_complete;
  639. if (set_complete->status != RNDIS_STATUS_SUCCESS)
  640. ret = -EIO;
  641. cleanup:
  642. put_rndis_request(rdev, request);
  643. return ret;
  644. }
  645. int
  646. rndis_filter_set_offload_params(struct net_device *ndev,
  647. struct netvsc_device *nvdev,
  648. struct ndis_offload_params *req_offloads)
  649. {
  650. struct rndis_device *rdev = nvdev->extension;
  651. struct rndis_request *request;
  652. struct rndis_set_request *set;
  653. struct ndis_offload_params *offload_params;
  654. struct rndis_set_complete *set_complete;
  655. u32 extlen = sizeof(struct ndis_offload_params);
  656. int ret;
  657. u32 vsp_version = nvdev->nvsp_version;
  658. if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
  659. extlen = VERSION_4_OFFLOAD_SIZE;
  660. /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
  661. * UDP checksum offload.
  662. */
  663. req_offloads->udp_ip_v4_csum = 0;
  664. req_offloads->udp_ip_v6_csum = 0;
  665. }
  666. request = get_rndis_request(rdev, RNDIS_MSG_SET,
  667. RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
  668. if (!request)
  669. return -ENOMEM;
  670. set = &request->request_msg.msg.set_req;
  671. set->oid = OID_TCP_OFFLOAD_PARAMETERS;
  672. set->info_buflen = extlen;
  673. set->info_buf_offset = sizeof(struct rndis_set_request);
  674. set->dev_vc_handle = 0;
  675. offload_params = (struct ndis_offload_params *)((ulong)set +
  676. set->info_buf_offset);
  677. *offload_params = *req_offloads;
  678. offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
  679. offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
  680. offload_params->header.size = extlen;
  681. ret = rndis_filter_send_request(rdev, request);
  682. if (ret != 0)
  683. goto cleanup;
  684. wait_for_completion(&request->wait_event);
  685. set_complete = &request->response_msg.msg.set_complete;
  686. if (set_complete->status != RNDIS_STATUS_SUCCESS) {
  687. netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
  688. set_complete->status);
  689. ret = -EINVAL;
  690. }
  691. cleanup:
  692. put_rndis_request(rdev, request);
  693. return ret;
  694. }
  695. static int rndis_set_rss_param_msg(struct rndis_device *rdev,
  696. const u8 *rss_key, u16 flag)
  697. {
  698. struct net_device *ndev = rdev->ndev;
  699. struct net_device_context *ndc = netdev_priv(ndev);
  700. struct rndis_request *request;
  701. struct rndis_set_request *set;
  702. struct rndis_set_complete *set_complete;
  703. u32 extlen = sizeof(struct ndis_recv_scale_param) +
  704. 4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
  705. struct ndis_recv_scale_param *rssp;
  706. u32 *itab;
  707. u8 *keyp;
  708. int i, ret;
  709. request = get_rndis_request(
  710. rdev, RNDIS_MSG_SET,
  711. RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
  712. if (!request)
  713. return -ENOMEM;
  714. set = &request->request_msg.msg.set_req;
  715. set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
  716. set->info_buflen = extlen;
  717. set->info_buf_offset = sizeof(struct rndis_set_request);
  718. set->dev_vc_handle = 0;
  719. rssp = (struct ndis_recv_scale_param *)(set + 1);
  720. rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
  721. rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
  722. rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
  723. rssp->flag = flag;
  724. rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
  725. NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
  726. NDIS_HASH_TCP_IPV6;
  727. rssp->indirect_tabsize = 4*ITAB_NUM;
  728. rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
  729. rssp->hashkey_size = NETVSC_HASH_KEYLEN;
  730. rssp->hashkey_offset = rssp->indirect_taboffset +
  731. rssp->indirect_tabsize;
  732. /* Set indirection table entries */
  733. itab = (u32 *)(rssp + 1);
  734. for (i = 0; i < ITAB_NUM; i++)
  735. itab[i] = ndc->rx_table[i];
  736. /* Set hask key values */
  737. keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
  738. memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
  739. ret = rndis_filter_send_request(rdev, request);
  740. if (ret != 0)
  741. goto cleanup;
  742. wait_for_completion(&request->wait_event);
  743. set_complete = &request->response_msg.msg.set_complete;
  744. if (set_complete->status == RNDIS_STATUS_SUCCESS) {
  745. if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
  746. !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
  747. memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
  748. } else {
  749. netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
  750. set_complete->status);
  751. ret = -EINVAL;
  752. }
  753. cleanup:
  754. put_rndis_request(rdev, request);
  755. return ret;
  756. }
  757. int rndis_filter_set_rss_param(struct rndis_device *rdev,
  758. const u8 *rss_key)
  759. {
  760. /* Disable RSS before change */
  761. rndis_set_rss_param_msg(rdev, rss_key,
  762. NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
  763. return rndis_set_rss_param_msg(rdev, rss_key, 0);
  764. }
  765. static int rndis_filter_query_device_link_status(struct rndis_device *dev,
  766. struct netvsc_device *net_device)
  767. {
  768. u32 size = sizeof(u32);
  769. u32 link_status;
  770. return rndis_filter_query_device(dev, net_device,
  771. RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
  772. &link_status, &size);
  773. }
  774. static int rndis_filter_query_link_speed(struct rndis_device *dev,
  775. struct netvsc_device *net_device)
  776. {
  777. u32 size = sizeof(u32);
  778. u32 link_speed;
  779. struct net_device_context *ndc;
  780. int ret;
  781. ret = rndis_filter_query_device(dev, net_device,
  782. RNDIS_OID_GEN_LINK_SPEED,
  783. &link_speed, &size);
  784. if (!ret) {
  785. ndc = netdev_priv(dev->ndev);
  786. /* The link speed reported from host is in 100bps unit, so
  787. * we convert it to Mbps here.
  788. */
  789. ndc->speed = link_speed / 10000;
  790. }
  791. return ret;
  792. }
  793. static int rndis_filter_set_packet_filter(struct rndis_device *dev,
  794. u32 new_filter)
  795. {
  796. struct rndis_request *request;
  797. struct rndis_set_request *set;
  798. int ret;
  799. if (dev->filter == new_filter)
  800. return 0;
  801. request = get_rndis_request(dev, RNDIS_MSG_SET,
  802. RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
  803. sizeof(u32));
  804. if (!request)
  805. return -ENOMEM;
  806. /* Setup the rndis set */
  807. set = &request->request_msg.msg.set_req;
  808. set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
  809. set->info_buflen = sizeof(u32);
  810. set->info_buf_offset = sizeof(struct rndis_set_request);
  811. memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
  812. &new_filter, sizeof(u32));
  813. ret = rndis_filter_send_request(dev, request);
  814. if (ret == 0) {
  815. wait_for_completion(&request->wait_event);
  816. dev->filter = new_filter;
  817. }
  818. put_rndis_request(dev, request);
  819. return ret;
  820. }
  821. static void rndis_set_multicast(struct work_struct *w)
  822. {
  823. struct rndis_device *rdev
  824. = container_of(w, struct rndis_device, mcast_work);
  825. u32 filter = NDIS_PACKET_TYPE_DIRECTED;
  826. unsigned int flags = rdev->ndev->flags;
  827. if (flags & IFF_PROMISC) {
  828. filter = NDIS_PACKET_TYPE_PROMISCUOUS;
  829. } else {
  830. if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
  831. filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
  832. if (flags & IFF_BROADCAST)
  833. filter |= NDIS_PACKET_TYPE_BROADCAST;
  834. }
  835. rndis_filter_set_packet_filter(rdev, filter);
  836. }
  837. void rndis_filter_update(struct netvsc_device *nvdev)
  838. {
  839. struct rndis_device *rdev = nvdev->extension;
  840. schedule_work(&rdev->mcast_work);
  841. }
  842. static int rndis_filter_init_device(struct rndis_device *dev,
  843. struct netvsc_device *nvdev)
  844. {
  845. struct rndis_request *request;
  846. struct rndis_initialize_request *init;
  847. struct rndis_initialize_complete *init_complete;
  848. u32 status;
  849. int ret;
  850. request = get_rndis_request(dev, RNDIS_MSG_INIT,
  851. RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
  852. if (!request) {
  853. ret = -ENOMEM;
  854. goto cleanup;
  855. }
  856. /* Setup the rndis set */
  857. init = &request->request_msg.msg.init_req;
  858. init->major_ver = RNDIS_MAJOR_VERSION;
  859. init->minor_ver = RNDIS_MINOR_VERSION;
  860. init->max_xfer_size = 0x4000;
  861. dev->state = RNDIS_DEV_INITIALIZING;
  862. ret = rndis_filter_send_request(dev, request);
  863. if (ret != 0) {
  864. dev->state = RNDIS_DEV_UNINITIALIZED;
  865. goto cleanup;
  866. }
  867. wait_for_completion(&request->wait_event);
  868. init_complete = &request->response_msg.msg.init_complete;
  869. status = init_complete->status;
  870. if (status == RNDIS_STATUS_SUCCESS) {
  871. dev->state = RNDIS_DEV_INITIALIZED;
  872. nvdev->max_pkt = init_complete->max_pkt_per_msg;
  873. nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
  874. ret = 0;
  875. } else {
  876. dev->state = RNDIS_DEV_UNINITIALIZED;
  877. ret = -EINVAL;
  878. }
  879. cleanup:
  880. if (request)
  881. put_rndis_request(dev, request);
  882. return ret;
  883. }
  884. static bool netvsc_device_idle(const struct netvsc_device *nvdev)
  885. {
  886. int i;
  887. for (i = 0; i < nvdev->num_chn; i++) {
  888. const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
  889. if (nvchan->mrc.first != nvchan->mrc.next)
  890. return false;
  891. if (atomic_read(&nvchan->queue_sends) > 0)
  892. return false;
  893. }
  894. return true;
  895. }
  896. static void rndis_filter_halt_device(struct netvsc_device *nvdev,
  897. struct rndis_device *dev)
  898. {
  899. struct rndis_request *request;
  900. struct rndis_halt_request *halt;
  901. /* Attempt to do a rndis device halt */
  902. request = get_rndis_request(dev, RNDIS_MSG_HALT,
  903. RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
  904. if (!request)
  905. goto cleanup;
  906. /* Setup the rndis set */
  907. halt = &request->request_msg.msg.halt_req;
  908. halt->req_id = atomic_inc_return(&dev->new_req_id);
  909. /* Ignore return since this msg is optional. */
  910. rndis_filter_send_request(dev, request);
  911. dev->state = RNDIS_DEV_UNINITIALIZED;
  912. cleanup:
  913. nvdev->destroy = true;
  914. /* Force flag to be ordered before waiting */
  915. wmb();
  916. /* Wait for all send completions */
  917. wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
  918. if (request)
  919. put_rndis_request(dev, request);
  920. }
  921. static int rndis_filter_open_device(struct rndis_device *dev)
  922. {
  923. int ret;
  924. if (dev->state != RNDIS_DEV_INITIALIZED)
  925. return 0;
  926. ret = rndis_filter_set_packet_filter(dev,
  927. NDIS_PACKET_TYPE_BROADCAST |
  928. NDIS_PACKET_TYPE_ALL_MULTICAST |
  929. NDIS_PACKET_TYPE_DIRECTED);
  930. if (ret == 0)
  931. dev->state = RNDIS_DEV_DATAINITIALIZED;
  932. return ret;
  933. }
  934. static int rndis_filter_close_device(struct rndis_device *dev)
  935. {
  936. int ret;
  937. if (dev->state != RNDIS_DEV_DATAINITIALIZED)
  938. return 0;
  939. /* Make sure rndis_set_multicast doesn't re-enable filter! */
  940. cancel_work_sync(&dev->mcast_work);
  941. ret = rndis_filter_set_packet_filter(dev, 0);
  942. if (ret == -ENODEV)
  943. ret = 0;
  944. if (ret == 0)
  945. dev->state = RNDIS_DEV_INITIALIZED;
  946. return ret;
  947. }
  948. static void netvsc_sc_open(struct vmbus_channel *new_sc)
  949. {
  950. struct net_device *ndev =
  951. hv_get_drvdata(new_sc->primary_channel->device_obj);
  952. struct net_device_context *ndev_ctx = netdev_priv(ndev);
  953. struct netvsc_device *nvscdev;
  954. u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
  955. struct netvsc_channel *nvchan;
  956. int ret;
  957. /* This is safe because this callback only happens when
  958. * new device is being setup and waiting on the channel_init_wait.
  959. */
  960. nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
  961. if (!nvscdev || chn_index >= nvscdev->num_chn)
  962. return;
  963. nvchan = nvscdev->chan_table + chn_index;
  964. /* Because the device uses NAPI, all the interrupt batching and
  965. * control is done via Net softirq, not the channel handling
  966. */
  967. set_channel_read_mode(new_sc, HV_CALL_ISR);
  968. /* Set the channel before opening.*/
  969. nvchan->channel = new_sc;
  970. ret = vmbus_open(new_sc, netvsc_ring_bytes,
  971. netvsc_ring_bytes, NULL, 0,
  972. netvsc_channel_cb, nvchan);
  973. if (ret == 0)
  974. napi_enable(&nvchan->napi);
  975. else
  976. netdev_notice(ndev, "sub channel open failed: %d\n", ret);
  977. if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
  978. wake_up(&nvscdev->subchan_open);
  979. }
  980. /* Open sub-channels after completing the handling of the device probe.
  981. * This breaks overlap of processing the host message for the
  982. * new primary channel with the initialization of sub-channels.
  983. */
  984. int rndis_set_subchannel(struct net_device *ndev,
  985. struct netvsc_device *nvdev,
  986. struct netvsc_device_info *dev_info)
  987. {
  988. struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
  989. struct net_device_context *ndev_ctx = netdev_priv(ndev);
  990. struct hv_device *hv_dev = ndev_ctx->device_ctx;
  991. struct rndis_device *rdev = nvdev->extension;
  992. int i, ret;
  993. ASSERT_RTNL();
  994. memset(init_packet, 0, sizeof(struct nvsp_message));
  995. init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
  996. init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
  997. init_packet->msg.v5_msg.subchn_req.num_subchannels =
  998. nvdev->num_chn - 1;
  999. trace_nvsp_send(ndev, init_packet);
  1000. ret = vmbus_sendpacket(hv_dev->channel, init_packet,
  1001. sizeof(struct nvsp_message),
  1002. (unsigned long)init_packet,
  1003. VM_PKT_DATA_INBAND,
  1004. VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
  1005. if (ret) {
  1006. netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
  1007. return ret;
  1008. }
  1009. wait_for_completion(&nvdev->channel_init_wait);
  1010. if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
  1011. netdev_err(ndev, "sub channel request failed\n");
  1012. return -EIO;
  1013. }
  1014. nvdev->num_chn = 1 +
  1015. init_packet->msg.v5_msg.subchn_comp.num_subchannels;
  1016. /* wait for all sub channels to open */
  1017. wait_event(nvdev->subchan_open,
  1018. atomic_read(&nvdev->open_chn) == nvdev->num_chn);
  1019. for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
  1020. ndev_ctx->tx_table[i] = i % nvdev->num_chn;
  1021. /* ignore failures from setting rss parameters, still have channels */
  1022. if (dev_info)
  1023. rndis_filter_set_rss_param(rdev, dev_info->rss_key);
  1024. else
  1025. rndis_filter_set_rss_param(rdev, netvsc_hash_key);
  1026. netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
  1027. netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
  1028. return 0;
  1029. }
  1030. static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
  1031. struct netvsc_device *nvdev)
  1032. {
  1033. struct net_device *net = rndis_device->ndev;
  1034. struct net_device_context *net_device_ctx = netdev_priv(net);
  1035. struct ndis_offload hwcaps;
  1036. struct ndis_offload_params offloads;
  1037. unsigned int gso_max_size = GSO_MAX_SIZE;
  1038. int ret;
  1039. /* Find HW offload capabilities */
  1040. ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
  1041. if (ret != 0)
  1042. return ret;
  1043. /* A value of zero means "no change"; now turn on what we want. */
  1044. memset(&offloads, 0, sizeof(struct ndis_offload_params));
  1045. /* Linux does not care about IP checksum, always does in kernel */
  1046. offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
  1047. /* Reset previously set hw_features flags */
  1048. net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
  1049. net_device_ctx->tx_checksum_mask = 0;
  1050. /* Compute tx offload settings based on hw capabilities */
  1051. net->hw_features |= NETIF_F_RXCSUM;
  1052. net->hw_features |= NETIF_F_SG;
  1053. net->hw_features |= NETIF_F_RXHASH;
  1054. if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
  1055. /* Can checksum TCP */
  1056. net->hw_features |= NETIF_F_IP_CSUM;
  1057. net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
  1058. offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
  1059. if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
  1060. offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
  1061. net->hw_features |= NETIF_F_TSO;
  1062. if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
  1063. gso_max_size = hwcaps.lsov2.ip4_maxsz;
  1064. }
  1065. if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
  1066. offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
  1067. net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
  1068. }
  1069. }
  1070. if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
  1071. net->hw_features |= NETIF_F_IPV6_CSUM;
  1072. offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
  1073. net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
  1074. if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
  1075. (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
  1076. offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
  1077. net->hw_features |= NETIF_F_TSO6;
  1078. if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
  1079. gso_max_size = hwcaps.lsov2.ip6_maxsz;
  1080. }
  1081. if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
  1082. offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
  1083. net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
  1084. }
  1085. }
  1086. if (hwcaps.rsc.ip4 && hwcaps.rsc.ip6) {
  1087. net->hw_features |= NETIF_F_LRO;
  1088. if (net->features & NETIF_F_LRO) {
  1089. offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
  1090. offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
  1091. } else {
  1092. offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
  1093. offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
  1094. }
  1095. }
  1096. /* In case some hw_features disappeared we need to remove them from
  1097. * net->features list as they're no longer supported.
  1098. */
  1099. net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
  1100. netif_set_gso_max_size(net, gso_max_size);
  1101. ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
  1102. return ret;
  1103. }
  1104. static void rndis_get_friendly_name(struct net_device *net,
  1105. struct rndis_device *rndis_device,
  1106. struct netvsc_device *net_device)
  1107. {
  1108. ucs2_char_t wname[256];
  1109. unsigned long len;
  1110. u8 ifalias[256];
  1111. u32 size;
  1112. size = sizeof(wname);
  1113. if (rndis_filter_query_device(rndis_device, net_device,
  1114. RNDIS_OID_GEN_FRIENDLY_NAME,
  1115. wname, &size) != 0)
  1116. return; /* ignore if host does not support */
  1117. if (size == 0)
  1118. return; /* name not set */
  1119. /* Convert Windows Unicode string to UTF-8 */
  1120. len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias));
  1121. /* ignore the default value from host */
  1122. if (strcmp(ifalias, "Network Adapter") != 0)
  1123. dev_set_alias(net, ifalias, len);
  1124. }
  1125. struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
  1126. struct netvsc_device_info *device_info)
  1127. {
  1128. struct net_device *net = hv_get_drvdata(dev);
  1129. struct net_device_context *ndc = netdev_priv(net);
  1130. struct netvsc_device *net_device;
  1131. struct rndis_device *rndis_device;
  1132. struct ndis_recv_scale_cap rsscap;
  1133. u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
  1134. u32 mtu, size;
  1135. u32 num_possible_rss_qs;
  1136. int i, ret;
  1137. rndis_device = get_rndis_device();
  1138. if (!rndis_device)
  1139. return ERR_PTR(-ENODEV);
  1140. /* Let the inner driver handle this first to create the netvsc channel
  1141. * NOTE! Once the channel is created, we may get a receive callback
  1142. * (RndisFilterOnReceive()) before this call is completed
  1143. */
  1144. net_device = netvsc_device_add(dev, device_info);
  1145. if (IS_ERR(net_device)) {
  1146. kfree(rndis_device);
  1147. return net_device;
  1148. }
  1149. /* Initialize the rndis device */
  1150. net_device->max_chn = 1;
  1151. net_device->num_chn = 1;
  1152. net_device->extension = rndis_device;
  1153. rndis_device->ndev = net;
  1154. /* Send the rndis initialization message */
  1155. ret = rndis_filter_init_device(rndis_device, net_device);
  1156. if (ret != 0)
  1157. goto err_dev_remv;
  1158. /* Get the MTU from the host */
  1159. size = sizeof(u32);
  1160. ret = rndis_filter_query_device(rndis_device, net_device,
  1161. RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
  1162. &mtu, &size);
  1163. if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
  1164. net->mtu = mtu;
  1165. /* Get the mac address */
  1166. ret = rndis_filter_query_device_mac(rndis_device, net_device);
  1167. if (ret != 0)
  1168. goto err_dev_remv;
  1169. memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
  1170. /* Get friendly name as ifalias*/
  1171. if (!net->ifalias)
  1172. rndis_get_friendly_name(net, rndis_device, net_device);
  1173. /* Query and set hardware capabilities */
  1174. ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
  1175. if (ret != 0)
  1176. goto err_dev_remv;
  1177. rndis_filter_query_device_link_status(rndis_device, net_device);
  1178. netdev_dbg(net, "Device MAC %pM link state %s\n",
  1179. rndis_device->hw_mac_adr,
  1180. rndis_device->link_state ? "down" : "up");
  1181. if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
  1182. goto out;
  1183. rndis_filter_query_link_speed(rndis_device, net_device);
  1184. /* vRSS setup */
  1185. memset(&rsscap, 0, rsscap_size);
  1186. ret = rndis_filter_query_device(rndis_device, net_device,
  1187. OID_GEN_RECEIVE_SCALE_CAPABILITIES,
  1188. &rsscap, &rsscap_size);
  1189. if (ret || rsscap.num_recv_que < 2)
  1190. goto out;
  1191. /* This guarantees that num_possible_rss_qs <= num_online_cpus */
  1192. num_possible_rss_qs = min_t(u32, num_online_cpus(),
  1193. rsscap.num_recv_que);
  1194. net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
  1195. /* We will use the given number of channels if available. */
  1196. net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
  1197. if (!netif_is_rxfh_configured(net)) {
  1198. for (i = 0; i < ITAB_NUM; i++)
  1199. ndc->rx_table[i] = ethtool_rxfh_indir_default(
  1200. i, net_device->num_chn);
  1201. }
  1202. atomic_set(&net_device->open_chn, 1);
  1203. vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
  1204. for (i = 1; i < net_device->num_chn; i++) {
  1205. ret = netvsc_alloc_recv_comp_ring(net_device, i);
  1206. if (ret) {
  1207. while (--i != 0)
  1208. vfree(net_device->chan_table[i].mrc.slots);
  1209. goto out;
  1210. }
  1211. }
  1212. for (i = 1; i < net_device->num_chn; i++)
  1213. netif_napi_add(net, &net_device->chan_table[i].napi,
  1214. netvsc_poll, NAPI_POLL_WEIGHT);
  1215. return net_device;
  1216. out:
  1217. /* setting up multiple channels failed */
  1218. net_device->max_chn = 1;
  1219. net_device->num_chn = 1;
  1220. return net_device;
  1221. err_dev_remv:
  1222. rndis_filter_device_remove(dev, net_device);
  1223. return ERR_PTR(ret);
  1224. }
  1225. void rndis_filter_device_remove(struct hv_device *dev,
  1226. struct netvsc_device *net_dev)
  1227. {
  1228. struct rndis_device *rndis_dev = net_dev->extension;
  1229. /* Halt and release the rndis device */
  1230. rndis_filter_halt_device(net_dev, rndis_dev);
  1231. netvsc_device_remove(dev);
  1232. }
  1233. int rndis_filter_open(struct netvsc_device *nvdev)
  1234. {
  1235. if (!nvdev)
  1236. return -EINVAL;
  1237. return rndis_filter_open_device(nvdev->extension);
  1238. }
  1239. int rndis_filter_close(struct netvsc_device *nvdev)
  1240. {
  1241. if (!nvdev)
  1242. return -EINVAL;
  1243. return rndis_filter_close_device(nvdev->extension);
  1244. }