net_kern.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  4. * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
  5. * James Leu (jleu@mindspring.net).
  6. * Copyright (C) 2001 by various other people who didn't put their name here.
  7. */
  8. #include <linux/memblock.h>
  9. #include <linux/etherdevice.h>
  10. #include <linux/ethtool.h>
  11. #include <linux/inetdevice.h>
  12. #include <linux/init.h>
  13. #include <linux/list.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/rtnetlink.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #include <init.h>
  21. #include <irq_kern.h>
  22. #include <irq_user.h>
  23. #include "mconsole_kern.h"
  24. #include <net_kern.h>
  25. #include <net_user.h>
  26. #define DRIVER_NAME "uml-netdev"
  27. static DEFINE_SPINLOCK(opened_lock);
  28. static LIST_HEAD(opened);
  29. /*
  30. * The drop_skb is used when we can't allocate an skb. The
  31. * packet is read into drop_skb in order to get the data off the
  32. * connection to the host.
  33. * It is reallocated whenever a maximum packet size is seen which is
  34. * larger than any seen before. update_drop_skb is called from
  35. * eth_configure when a new interface is added.
  36. */
  37. static DEFINE_SPINLOCK(drop_lock);
  38. static struct sk_buff *drop_skb;
  39. static int drop_max;
  40. static int update_drop_skb(int max)
  41. {
  42. struct sk_buff *new;
  43. unsigned long flags;
  44. int err = 0;
  45. spin_lock_irqsave(&drop_lock, flags);
  46. if (max <= drop_max)
  47. goto out;
  48. err = -ENOMEM;
  49. new = dev_alloc_skb(max);
  50. if (new == NULL)
  51. goto out;
  52. skb_put(new, max);
  53. kfree_skb(drop_skb);
  54. drop_skb = new;
  55. drop_max = max;
  56. err = 0;
  57. out:
  58. spin_unlock_irqrestore(&drop_lock, flags);
  59. return err;
  60. }
  61. static int uml_net_rx(struct net_device *dev)
  62. {
  63. struct uml_net_private *lp = netdev_priv(dev);
  64. int pkt_len;
  65. struct sk_buff *skb;
  66. /* If we can't allocate memory, try again next round. */
  67. skb = dev_alloc_skb(lp->max_packet);
  68. if (skb == NULL) {
  69. drop_skb->dev = dev;
  70. /* Read a packet into drop_skb and don't do anything with it. */
  71. (*lp->read)(lp->fd, drop_skb, lp);
  72. dev->stats.rx_dropped++;
  73. return 0;
  74. }
  75. skb->dev = dev;
  76. skb_put(skb, lp->max_packet);
  77. skb_reset_mac_header(skb);
  78. pkt_len = (*lp->read)(lp->fd, skb, lp);
  79. if (pkt_len > 0) {
  80. skb_trim(skb, pkt_len);
  81. skb->protocol = (*lp->protocol)(skb);
  82. dev->stats.rx_bytes += skb->len;
  83. dev->stats.rx_packets++;
  84. netif_rx(skb);
  85. return pkt_len;
  86. }
  87. kfree_skb(skb);
  88. return pkt_len;
  89. }
  90. static void uml_dev_close(struct work_struct *work)
  91. {
  92. struct uml_net_private *lp =
  93. container_of(work, struct uml_net_private, work);
  94. dev_close(lp->dev);
  95. }
  96. static irqreturn_t uml_net_interrupt(int irq, void *dev_id)
  97. {
  98. struct net_device *dev = dev_id;
  99. struct uml_net_private *lp = netdev_priv(dev);
  100. int err;
  101. if (!netif_running(dev))
  102. return IRQ_NONE;
  103. spin_lock(&lp->lock);
  104. while ((err = uml_net_rx(dev)) > 0) ;
  105. if (err < 0) {
  106. printk(KERN_ERR
  107. "Device '%s' read returned %d, shutting it down\n",
  108. dev->name, err);
  109. /* dev_close can't be called in interrupt context, and takes
  110. * again lp->lock.
  111. * And dev_close() can be safely called multiple times on the
  112. * same device, since it tests for (dev->flags & IFF_UP). So
  113. * there's no harm in delaying the device shutdown.
  114. * Furthermore, the workqueue will not re-enqueue an already
  115. * enqueued work item. */
  116. schedule_work(&lp->work);
  117. goto out;
  118. }
  119. out:
  120. spin_unlock(&lp->lock);
  121. return IRQ_HANDLED;
  122. }
  123. static int uml_net_open(struct net_device *dev)
  124. {
  125. struct uml_net_private *lp = netdev_priv(dev);
  126. int err;
  127. if (lp->fd >= 0) {
  128. err = -ENXIO;
  129. goto out;
  130. }
  131. lp->fd = (*lp->open)(&lp->user);
  132. if (lp->fd < 0) {
  133. err = lp->fd;
  134. goto out;
  135. }
  136. err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt,
  137. IRQF_SHARED, dev->name, dev);
  138. if (err != 0) {
  139. printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err);
  140. err = -ENETUNREACH;
  141. goto out_close;
  142. }
  143. netif_start_queue(dev);
  144. /* clear buffer - it can happen that the host side of the interface
  145. * is full when we get here. In this case, new data is never queued,
  146. * SIGIOs never arrive, and the net never works.
  147. */
  148. while ((err = uml_net_rx(dev)) > 0) ;
  149. spin_lock(&opened_lock);
  150. list_add(&lp->list, &opened);
  151. spin_unlock(&opened_lock);
  152. return 0;
  153. out_close:
  154. if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
  155. lp->fd = -1;
  156. out:
  157. return err;
  158. }
  159. static int uml_net_close(struct net_device *dev)
  160. {
  161. struct uml_net_private *lp = netdev_priv(dev);
  162. netif_stop_queue(dev);
  163. um_free_irq(dev->irq, dev);
  164. if (lp->close != NULL)
  165. (*lp->close)(lp->fd, &lp->user);
  166. lp->fd = -1;
  167. spin_lock(&opened_lock);
  168. list_del(&lp->list);
  169. spin_unlock(&opened_lock);
  170. return 0;
  171. }
  172. static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
  173. {
  174. struct uml_net_private *lp = netdev_priv(dev);
  175. unsigned long flags;
  176. int len;
  177. netif_stop_queue(dev);
  178. spin_lock_irqsave(&lp->lock, flags);
  179. len = (*lp->write)(lp->fd, skb, lp);
  180. skb_tx_timestamp(skb);
  181. if (len == skb->len) {
  182. dev->stats.tx_packets++;
  183. dev->stats.tx_bytes += skb->len;
  184. netif_trans_update(dev);
  185. netif_start_queue(dev);
  186. /* this is normally done in the interrupt when tx finishes */
  187. netif_wake_queue(dev);
  188. }
  189. else if (len == 0) {
  190. netif_start_queue(dev);
  191. dev->stats.tx_dropped++;
  192. }
  193. else {
  194. netif_start_queue(dev);
  195. printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len);
  196. }
  197. spin_unlock_irqrestore(&lp->lock, flags);
  198. dev_consume_skb_any(skb);
  199. return NETDEV_TX_OK;
  200. }
  201. static void uml_net_set_multicast_list(struct net_device *dev)
  202. {
  203. return;
  204. }
  205. static void uml_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
  206. {
  207. netif_trans_update(dev);
  208. netif_wake_queue(dev);
  209. }
  210. #ifdef CONFIG_NET_POLL_CONTROLLER
  211. static void uml_net_poll_controller(struct net_device *dev)
  212. {
  213. disable_irq(dev->irq);
  214. uml_net_interrupt(dev->irq, dev);
  215. enable_irq(dev->irq);
  216. }
  217. #endif
  218. static void uml_net_get_drvinfo(struct net_device *dev,
  219. struct ethtool_drvinfo *info)
  220. {
  221. strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
  222. }
  223. static const struct ethtool_ops uml_net_ethtool_ops = {
  224. .get_drvinfo = uml_net_get_drvinfo,
  225. .get_link = ethtool_op_get_link,
  226. .get_ts_info = ethtool_op_get_ts_info,
  227. };
  228. void uml_net_setup_etheraddr(struct net_device *dev, char *str)
  229. {
  230. unsigned char *addr = dev->dev_addr;
  231. char *end;
  232. int i;
  233. if (str == NULL)
  234. goto random;
  235. for (i = 0; i < 6; i++) {
  236. addr[i] = simple_strtoul(str, &end, 16);
  237. if ((end == str) ||
  238. ((*end != ':') && (*end != ',') && (*end != '\0'))) {
  239. printk(KERN_ERR
  240. "setup_etheraddr: failed to parse '%s' "
  241. "as an ethernet address\n", str);
  242. goto random;
  243. }
  244. str = end + 1;
  245. }
  246. if (is_multicast_ether_addr(addr)) {
  247. printk(KERN_ERR
  248. "Attempt to assign a multicast ethernet address to a "
  249. "device disallowed\n");
  250. goto random;
  251. }
  252. if (!is_valid_ether_addr(addr)) {
  253. printk(KERN_ERR
  254. "Attempt to assign an invalid ethernet address to a "
  255. "device disallowed\n");
  256. goto random;
  257. }
  258. if (!is_local_ether_addr(addr)) {
  259. printk(KERN_WARNING
  260. "Warning: Assigning a globally valid ethernet "
  261. "address to a device\n");
  262. printk(KERN_WARNING "You should set the 2nd rightmost bit in "
  263. "the first byte of the MAC,\n");
  264. printk(KERN_WARNING "i.e. %02x:%02x:%02x:%02x:%02x:%02x\n",
  265. addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4],
  266. addr[5]);
  267. }
  268. return;
  269. random:
  270. printk(KERN_INFO
  271. "Choosing a random ethernet address for device %s\n", dev->name);
  272. eth_hw_addr_random(dev);
  273. }
  274. static DEFINE_SPINLOCK(devices_lock);
  275. static LIST_HEAD(devices);
  276. static struct platform_driver uml_net_driver = {
  277. .driver = {
  278. .name = DRIVER_NAME,
  279. },
  280. };
  281. static void net_device_release(struct device *dev)
  282. {
  283. struct uml_net *device = dev_get_drvdata(dev);
  284. struct net_device *netdev = device->dev;
  285. struct uml_net_private *lp = netdev_priv(netdev);
  286. if (lp->remove != NULL)
  287. (*lp->remove)(&lp->user);
  288. list_del(&device->list);
  289. kfree(device);
  290. free_netdev(netdev);
  291. }
  292. static const struct net_device_ops uml_netdev_ops = {
  293. .ndo_open = uml_net_open,
  294. .ndo_stop = uml_net_close,
  295. .ndo_start_xmit = uml_net_start_xmit,
  296. .ndo_set_rx_mode = uml_net_set_multicast_list,
  297. .ndo_tx_timeout = uml_net_tx_timeout,
  298. .ndo_set_mac_address = eth_mac_addr,
  299. .ndo_validate_addr = eth_validate_addr,
  300. #ifdef CONFIG_NET_POLL_CONTROLLER
  301. .ndo_poll_controller = uml_net_poll_controller,
  302. #endif
  303. };
  304. /*
  305. * Ensures that platform_driver_register is called only once by
  306. * eth_configure. Will be set in an initcall.
  307. */
  308. static int driver_registered;
  309. static void eth_configure(int n, void *init, char *mac,
  310. struct transport *transport, gfp_t gfp_mask)
  311. {
  312. struct uml_net *device;
  313. struct net_device *dev;
  314. struct uml_net_private *lp;
  315. int err, size;
  316. size = transport->private_size + sizeof(struct uml_net_private);
  317. device = kzalloc(sizeof(*device), gfp_mask);
  318. if (device == NULL) {
  319. printk(KERN_ERR "eth_configure failed to allocate struct "
  320. "uml_net\n");
  321. return;
  322. }
  323. dev = alloc_etherdev(size);
  324. if (dev == NULL) {
  325. printk(KERN_ERR "eth_configure: failed to allocate struct "
  326. "net_device for eth%d\n", n);
  327. goto out_free_device;
  328. }
  329. INIT_LIST_HEAD(&device->list);
  330. device->index = n;
  331. /* If this name ends up conflicting with an existing registered
  332. * netdevice, that is OK, register_netdev{,ice}() will notice this
  333. * and fail.
  334. */
  335. snprintf(dev->name, sizeof(dev->name), "eth%d", n);
  336. uml_net_setup_etheraddr(dev, mac);
  337. printk(KERN_INFO "Netdevice %d (%pM) : ", n, dev->dev_addr);
  338. lp = netdev_priv(dev);
  339. /* This points to the transport private data. It's still clear, but we
  340. * must memset it to 0 *now*. Let's help the drivers. */
  341. memset(lp, 0, size);
  342. INIT_WORK(&lp->work, uml_dev_close);
  343. /* sysfs register */
  344. if (!driver_registered) {
  345. platform_driver_register(&uml_net_driver);
  346. driver_registered = 1;
  347. }
  348. device->pdev.id = n;
  349. device->pdev.name = DRIVER_NAME;
  350. device->pdev.dev.release = net_device_release;
  351. dev_set_drvdata(&device->pdev.dev, device);
  352. if (platform_device_register(&device->pdev))
  353. goto out_free_netdev;
  354. SET_NETDEV_DEV(dev,&device->pdev.dev);
  355. device->dev = dev;
  356. /*
  357. * These just fill in a data structure, so there's no failure
  358. * to be worried about.
  359. */
  360. (*transport->kern->init)(dev, init);
  361. *lp = ((struct uml_net_private)
  362. { .list = LIST_HEAD_INIT(lp->list),
  363. .dev = dev,
  364. .fd = -1,
  365. .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0},
  366. .max_packet = transport->user->max_packet,
  367. .protocol = transport->kern->protocol,
  368. .open = transport->user->open,
  369. .close = transport->user->close,
  370. .remove = transport->user->remove,
  371. .read = transport->kern->read,
  372. .write = transport->kern->write,
  373. .add_address = transport->user->add_address,
  374. .delete_address = transport->user->delete_address });
  375. spin_lock_init(&lp->lock);
  376. memcpy(lp->mac, dev->dev_addr, sizeof(lp->mac));
  377. if ((transport->user->init != NULL) &&
  378. ((*transport->user->init)(&lp->user, dev) != 0))
  379. goto out_unregister;
  380. dev->mtu = transport->user->mtu;
  381. dev->netdev_ops = &uml_netdev_ops;
  382. dev->ethtool_ops = &uml_net_ethtool_ops;
  383. dev->watchdog_timeo = (HZ >> 1);
  384. dev->irq = UM_ETH_IRQ;
  385. err = update_drop_skb(lp->max_packet);
  386. if (err)
  387. goto out_undo_user_init;
  388. rtnl_lock();
  389. err = register_netdevice(dev);
  390. rtnl_unlock();
  391. if (err)
  392. goto out_undo_user_init;
  393. spin_lock(&devices_lock);
  394. list_add(&device->list, &devices);
  395. spin_unlock(&devices_lock);
  396. return;
  397. out_undo_user_init:
  398. if (transport->user->remove != NULL)
  399. (*transport->user->remove)(&lp->user);
  400. out_unregister:
  401. platform_device_unregister(&device->pdev);
  402. return; /* platform_device_unregister frees dev and device */
  403. out_free_netdev:
  404. free_netdev(dev);
  405. out_free_device:
  406. kfree(device);
  407. }
  408. static struct uml_net *find_device(int n)
  409. {
  410. struct uml_net *device;
  411. struct list_head *ele;
  412. spin_lock(&devices_lock);
  413. list_for_each(ele, &devices) {
  414. device = list_entry(ele, struct uml_net, list);
  415. if (device->index == n)
  416. goto out;
  417. }
  418. device = NULL;
  419. out:
  420. spin_unlock(&devices_lock);
  421. return device;
  422. }
  423. static int eth_parse(char *str, int *index_out, char **str_out,
  424. char **error_out)
  425. {
  426. char *end;
  427. int n, err = -EINVAL;
  428. n = simple_strtoul(str, &end, 0);
  429. if (end == str) {
  430. *error_out = "Bad device number";
  431. return err;
  432. }
  433. str = end;
  434. if (*str != '=') {
  435. *error_out = "Expected '=' after device number";
  436. return err;
  437. }
  438. str++;
  439. if (find_device(n)) {
  440. *error_out = "Device already configured";
  441. return err;
  442. }
  443. *index_out = n;
  444. *str_out = str;
  445. return 0;
  446. }
  447. struct eth_init {
  448. struct list_head list;
  449. char *init;
  450. int index;
  451. };
  452. static DEFINE_SPINLOCK(transports_lock);
  453. static LIST_HEAD(transports);
  454. /* Filled in during early boot */
  455. static LIST_HEAD(eth_cmd_line);
  456. static int check_transport(struct transport *transport, char *eth, int n,
  457. void **init_out, char **mac_out, gfp_t gfp_mask)
  458. {
  459. int len;
  460. len = strlen(transport->name);
  461. if (strncmp(eth, transport->name, len))
  462. return 0;
  463. eth += len;
  464. if (*eth == ',')
  465. eth++;
  466. else if (*eth != '\0')
  467. return 0;
  468. *init_out = kmalloc(transport->setup_size, gfp_mask);
  469. if (*init_out == NULL)
  470. return 1;
  471. if (!transport->setup(eth, mac_out, *init_out)) {
  472. kfree(*init_out);
  473. *init_out = NULL;
  474. }
  475. return 1;
  476. }
  477. void register_transport(struct transport *new)
  478. {
  479. struct list_head *ele, *next;
  480. struct eth_init *eth;
  481. void *init;
  482. char *mac = NULL;
  483. int match;
  484. spin_lock(&transports_lock);
  485. BUG_ON(!list_empty(&new->list));
  486. list_add(&new->list, &transports);
  487. spin_unlock(&transports_lock);
  488. list_for_each_safe(ele, next, &eth_cmd_line) {
  489. eth = list_entry(ele, struct eth_init, list);
  490. match = check_transport(new, eth->init, eth->index, &init,
  491. &mac, GFP_KERNEL);
  492. if (!match)
  493. continue;
  494. else if (init != NULL) {
  495. eth_configure(eth->index, init, mac, new, GFP_KERNEL);
  496. kfree(init);
  497. }
  498. list_del(&eth->list);
  499. }
  500. }
  501. static int eth_setup_common(char *str, int index)
  502. {
  503. struct list_head *ele;
  504. struct transport *transport;
  505. void *init;
  506. char *mac = NULL;
  507. int found = 0;
  508. spin_lock(&transports_lock);
  509. list_for_each(ele, &transports) {
  510. transport = list_entry(ele, struct transport, list);
  511. if (!check_transport(transport, str, index, &init,
  512. &mac, GFP_ATOMIC))
  513. continue;
  514. if (init != NULL) {
  515. eth_configure(index, init, mac, transport, GFP_ATOMIC);
  516. kfree(init);
  517. }
  518. found = 1;
  519. break;
  520. }
  521. spin_unlock(&transports_lock);
  522. return found;
  523. }
  524. static int __init eth_setup(char *str)
  525. {
  526. struct eth_init *new;
  527. char *error;
  528. int n, err;
  529. err = eth_parse(str, &n, &str, &error);
  530. if (err) {
  531. printk(KERN_ERR "eth_setup - Couldn't parse '%s' : %s\n",
  532. str, error);
  533. return 1;
  534. }
  535. new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
  536. if (!new)
  537. panic("%s: Failed to allocate %zu bytes\n", __func__,
  538. sizeof(*new));
  539. INIT_LIST_HEAD(&new->list);
  540. new->index = n;
  541. new->init = str;
  542. list_add_tail(&new->list, &eth_cmd_line);
  543. return 1;
  544. }
  545. __setup("eth", eth_setup);
  546. __uml_help(eth_setup,
  547. "eth[0-9]+=<transport>,<options>\n"
  548. " Configure a network device.\n\n"
  549. );
  550. static int net_config(char *str, char **error_out)
  551. {
  552. int n, err;
  553. err = eth_parse(str, &n, &str, error_out);
  554. if (err)
  555. return err;
  556. /* This string is broken up and the pieces used by the underlying
  557. * driver. So, it is freed only if eth_setup_common fails.
  558. */
  559. str = kstrdup(str, GFP_KERNEL);
  560. if (str == NULL) {
  561. *error_out = "net_config failed to strdup string";
  562. return -ENOMEM;
  563. }
  564. err = !eth_setup_common(str, n);
  565. if (err)
  566. kfree(str);
  567. return err;
  568. }
  569. static int net_id(char **str, int *start_out, int *end_out)
  570. {
  571. char *end;
  572. int n;
  573. n = simple_strtoul(*str, &end, 0);
  574. if ((*end != '\0') || (end == *str))
  575. return -1;
  576. *start_out = n;
  577. *end_out = n;
  578. *str = end;
  579. return n;
  580. }
  581. static int net_remove(int n, char **error_out)
  582. {
  583. struct uml_net *device;
  584. struct net_device *dev;
  585. struct uml_net_private *lp;
  586. device = find_device(n);
  587. if (device == NULL)
  588. return -ENODEV;
  589. dev = device->dev;
  590. lp = netdev_priv(dev);
  591. if (lp->fd > 0)
  592. return -EBUSY;
  593. unregister_netdev(dev);
  594. platform_device_unregister(&device->pdev);
  595. return 0;
  596. }
  597. static struct mc_device net_mc = {
  598. .list = LIST_HEAD_INIT(net_mc.list),
  599. .name = "eth",
  600. .config = net_config,
  601. .get_config = NULL,
  602. .id = net_id,
  603. .remove = net_remove,
  604. };
  605. #ifdef CONFIG_INET
  606. static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
  607. void *ptr)
  608. {
  609. struct in_ifaddr *ifa = ptr;
  610. struct net_device *dev = ifa->ifa_dev->dev;
  611. struct uml_net_private *lp;
  612. void (*proc)(unsigned char *, unsigned char *, void *);
  613. unsigned char addr_buf[4], netmask_buf[4];
  614. if (dev->netdev_ops->ndo_open != uml_net_open)
  615. return NOTIFY_DONE;
  616. lp = netdev_priv(dev);
  617. proc = NULL;
  618. switch (event) {
  619. case NETDEV_UP:
  620. proc = lp->add_address;
  621. break;
  622. case NETDEV_DOWN:
  623. proc = lp->delete_address;
  624. break;
  625. }
  626. if (proc != NULL) {
  627. memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf));
  628. memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf));
  629. (*proc)(addr_buf, netmask_buf, &lp->user);
  630. }
  631. return NOTIFY_DONE;
  632. }
  633. /* uml_net_init shouldn't be called twice on two CPUs at the same time */
  634. static struct notifier_block uml_inetaddr_notifier = {
  635. .notifier_call = uml_inetaddr_event,
  636. };
  637. static void inet_register(void)
  638. {
  639. struct list_head *ele;
  640. struct uml_net_private *lp;
  641. struct in_device *ip;
  642. struct in_ifaddr *in;
  643. register_inetaddr_notifier(&uml_inetaddr_notifier);
  644. /* Devices may have been opened already, so the uml_inetaddr_notifier
  645. * didn't get a chance to run for them. This fakes it so that
  646. * addresses which have already been set up get handled properly.
  647. */
  648. spin_lock(&opened_lock);
  649. list_for_each(ele, &opened) {
  650. lp = list_entry(ele, struct uml_net_private, list);
  651. ip = lp->dev->ip_ptr;
  652. if (ip == NULL)
  653. continue;
  654. in = ip->ifa_list;
  655. while (in != NULL) {
  656. uml_inetaddr_event(NULL, NETDEV_UP, in);
  657. in = in->ifa_next;
  658. }
  659. }
  660. spin_unlock(&opened_lock);
  661. }
  662. #else
  663. static inline void inet_register(void)
  664. {
  665. }
  666. #endif
  667. static int uml_net_init(void)
  668. {
  669. mconsole_register_dev(&net_mc);
  670. inet_register();
  671. return 0;
  672. }
  673. __initcall(uml_net_init);
  674. static void close_devices(void)
  675. {
  676. struct list_head *ele;
  677. struct uml_net_private *lp;
  678. spin_lock(&opened_lock);
  679. list_for_each(ele, &opened) {
  680. lp = list_entry(ele, struct uml_net_private, list);
  681. um_free_irq(lp->dev->irq, lp->dev);
  682. if ((lp->close != NULL) && (lp->fd >= 0))
  683. (*lp->close)(lp->fd, &lp->user);
  684. if (lp->remove != NULL)
  685. (*lp->remove)(&lp->user);
  686. }
  687. spin_unlock(&opened_lock);
  688. }
  689. __uml_exitcall(close_devices);
  690. void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *,
  691. void *),
  692. void *arg)
  693. {
  694. struct net_device *dev = d;
  695. struct in_device *ip = dev->ip_ptr;
  696. struct in_ifaddr *in;
  697. unsigned char address[4], netmask[4];
  698. if (ip == NULL) return;
  699. in = ip->ifa_list;
  700. while (in != NULL) {
  701. memcpy(address, &in->ifa_address, sizeof(address));
  702. memcpy(netmask, &in->ifa_mask, sizeof(netmask));
  703. (*cb)(address, netmask, arg);
  704. in = in->ifa_next;
  705. }
  706. }
  707. int dev_netmask(void *d, void *m)
  708. {
  709. struct net_device *dev = d;
  710. struct in_device *ip = dev->ip_ptr;
  711. struct in_ifaddr *in;
  712. __be32 *mask_out = m;
  713. if (ip == NULL)
  714. return 1;
  715. in = ip->ifa_list;
  716. if (in == NULL)
  717. return 1;
  718. *mask_out = in->ifa_mask;
  719. return 0;
  720. }
  721. void *get_output_buffer(int *len_out)
  722. {
  723. void *ret;
  724. ret = (void *) __get_free_pages(GFP_KERNEL, 0);
  725. if (ret) *len_out = PAGE_SIZE;
  726. else *len_out = 0;
  727. return ret;
  728. }
  729. void free_output_buffer(void *buffer)
  730. {
  731. free_pages((unsigned long) buffer, 0);
  732. }
  733. int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out,
  734. char **gate_addr)
  735. {
  736. char *remain;
  737. remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL);
  738. if (remain != NULL) {
  739. printk(KERN_ERR "tap_setup_common - Extra garbage on "
  740. "specification : '%s'\n", remain);
  741. return 1;
  742. }
  743. return 0;
  744. }
  745. unsigned short eth_protocol(struct sk_buff *skb)
  746. {
  747. return eth_type_trans(skb, skb->dev);
  748. }