shaper.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. /*
  2. * Simple traffic shaper for Linux NET3.
  3. *
  4. * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
  5. * http://www.redhat.com
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. *
  12. * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
  13. * warranty for any of this software. This material is provided
  14. * "AS-IS" and at no charge.
  15. *
  16. *
  17. * Algorithm:
  18. *
  19. * Queue Frame:
  20. * Compute time length of frame at regulated speed
  21. * Add frame to queue at appropriate point
  22. * Adjust time length computation for followup frames
  23. * Any frame that falls outside of its boundaries is freed
  24. *
  25. * We work to the following constants
  26. *
  27. * SHAPER_QLEN Maximum queued frames
  28. * SHAPER_LATENCY Bounding latency on a frame. Leaving this latency
  29. * window drops the frame. This stops us queueing
  30. * frames for a long time and confusing a remote
  31. * host.
  32. * SHAPER_MAXSLIP Maximum time a priority frame may jump forward.
  33. * That bounds the penalty we will inflict on low
  34. * priority traffic.
  35. * SHAPER_BURST Time range we call "now" in order to reduce
  36. * system load. The more we make this the burstier
  37. * the behaviour, the better local performance you
  38. * get through packet clustering on routers and the
  39. * worse the remote end gets to judge rtts.
  40. *
  41. * This is designed to handle lower speed links ( < 200K/second or so). We
  42. * run off a 100-150Hz base clock typically. This gives us a resolution at
  43. * 200Kbit/second of about 2Kbit or 256 bytes. Above that our timer
  44. * resolution may start to cause much more burstiness in the traffic. We
  45. * could avoid a lot of that by calling kick_shaper() at the end of the
  46. * tied device transmissions. If you run above about 100K second you
  47. * may need to tune the supposed speed rate for the right values.
  48. *
  49. * BUGS:
  50. * Downing the interface under the shaper before the shaper
  51. * will render your machine defunct. Don't for now shape over
  52. * PPP or SLIP therefore!
  53. * This will be fixed in BETA4
  54. *
  55. * Update History :
  56. *
  57. * bh_atomic() SMP races fixes and rewritten the locking code to
  58. * be SMP safe and irq-mask friendly.
  59. * NOTE: we can't use start_bh_atomic() in kick_shaper()
  60. * because it's going to be recalled from an irq handler,
  61. * and synchronize_bh() is a nono if called from irq context.
  62. * 1999 Andrea Arcangeli
  63. *
  64. * Device statistics (tx_pakets, tx_bytes,
  65. * tx_drops: queue_over_time and collisions: max_queue_exceded)
  66. * 1999/06/18 Jordi Murgo <savage@apostols.org>
  67. *
  68. * Use skb->cb for private data.
  69. * 2000/03 Andi Kleen
  70. */
  71. #include <linux/module.h>
  72. #include <linux/kernel.h>
  73. #include <linux/fcntl.h>
  74. #include <linux/mm.h>
  75. #include <linux/slab.h>
  76. #include <linux/string.h>
  77. #include <linux/errno.h>
  78. #include <linux/netdevice.h>
  79. #include <linux/etherdevice.h>
  80. #include <linux/skbuff.h>
  81. #include <linux/if_arp.h>
  82. #include <linux/init.h>
  83. #include <linux/if_shaper.h>
  84. #include <linux/jiffies.h>
  85. #include <net/dst.h>
  86. #include <net/arp.h>
  87. struct shaper_cb {
  88. unsigned long shapeclock; /* Time it should go out */
  89. unsigned long shapestamp; /* Stamp for shaper */
  90. __u32 shapelatency; /* Latency on frame */
  91. __u32 shapelen; /* Frame length in clocks */
  92. __u16 shapepend; /* Pending */
  93. };
  94. #define SHAPERCB(skb) ((struct shaper_cb *) ((skb)->cb))
  95. static int sh_debug; /* Debug flag */
  96. #define SHAPER_BANNER "CymruNet Traffic Shaper BETA 0.04 for Linux 2.1\n"
  97. static void shaper_kick(struct shaper *sh);
  98. /*
  99. * Compute clocks on a buffer
  100. */
  101. static int shaper_clocks(struct shaper *shaper, struct sk_buff *skb)
  102. {
  103. int t=skb->len/shaper->bytespertick;
  104. return t;
  105. }
  106. /*
  107. * Set the speed of a shaper. We compute this in bytes per tick since
  108. * thats how the machine wants to run. Quoted input is in bits per second
  109. * as is traditional (note not BAUD). We assume 8 bit bytes.
  110. */
  111. static void shaper_setspeed(struct shaper *shaper, int bitspersec)
  112. {
  113. shaper->bitspersec=bitspersec;
  114. shaper->bytespertick=(bitspersec/HZ)/8;
  115. if(!shaper->bytespertick)
  116. shaper->bytespertick++;
  117. }
  118. /*
  119. * Throw a frame at a shaper.
  120. */
  121. static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
  122. {
  123. struct shaper *shaper = dev->priv;
  124. struct sk_buff *ptr;
  125. spin_lock(&shaper->lock);
  126. ptr=shaper->sendq.prev;
  127. /*
  128. * Set up our packet details
  129. */
  130. SHAPERCB(skb)->shapelatency=0;
  131. SHAPERCB(skb)->shapeclock=shaper->recovery;
  132. if(time_before(SHAPERCB(skb)->shapeclock, jiffies))
  133. SHAPERCB(skb)->shapeclock=jiffies;
  134. skb->priority=0; /* short term bug fix */
  135. SHAPERCB(skb)->shapestamp=jiffies;
  136. /*
  137. * Time slots for this packet.
  138. */
  139. SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
  140. {
  141. struct sk_buff *tmp;
  142. /*
  143. * Up our shape clock by the time pending on the queue
  144. * (Should keep this in the shaper as a variable..)
  145. */
  146. for(tmp=skb_peek(&shaper->sendq); tmp!=NULL &&
  147. tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
  148. SHAPERCB(skb)->shapeclock+=SHAPERCB(tmp)->shapelen;
  149. /*
  150. * Queue over time. Spill packet.
  151. */
  152. if(time_after(SHAPERCB(skb)->shapeclock,jiffies + SHAPER_LATENCY)) {
  153. dev_kfree_skb(skb);
  154. shaper->stats.tx_dropped++;
  155. } else
  156. skb_queue_tail(&shaper->sendq, skb);
  157. }
  158. if(sh_debug)
  159. printk("Frame queued.\n");
  160. if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
  161. {
  162. ptr=skb_dequeue(&shaper->sendq);
  163. dev_kfree_skb(ptr);
  164. shaper->stats.collisions++;
  165. }
  166. shaper_kick(shaper);
  167. spin_unlock(&shaper->lock);
  168. return 0;
  169. }
  170. /*
  171. * Transmit from a shaper
  172. */
  173. static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb)
  174. {
  175. struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC);
  176. if(sh_debug)
  177. printk("Kick frame on %p\n",newskb);
  178. if(newskb)
  179. {
  180. newskb->dev=shaper->dev;
  181. newskb->priority=2;
  182. if(sh_debug)
  183. printk("Kick new frame to %s, %d\n",
  184. shaper->dev->name,newskb->priority);
  185. dev_queue_xmit(newskb);
  186. shaper->stats.tx_bytes += skb->len;
  187. shaper->stats.tx_packets++;
  188. if(sh_debug)
  189. printk("Kicked new frame out.\n");
  190. dev_kfree_skb(skb);
  191. }
  192. }
  193. /*
  194. * Timer handler for shaping clock
  195. */
  196. static void shaper_timer(unsigned long data)
  197. {
  198. struct shaper *shaper = (struct shaper *)data;
  199. spin_lock(&shaper->lock);
  200. shaper_kick(shaper);
  201. spin_unlock(&shaper->lock);
  202. }
  203. /*
  204. * Kick a shaper queue and try and do something sensible with the
  205. * queue.
  206. */
  207. static void shaper_kick(struct shaper *shaper)
  208. {
  209. struct sk_buff *skb;
  210. /*
  211. * Walk the list (may be empty)
  212. */
  213. while((skb=skb_peek(&shaper->sendq))!=NULL)
  214. {
  215. /*
  216. * Each packet due to go out by now (within an error
  217. * of SHAPER_BURST) gets kicked onto the link
  218. */
  219. if(sh_debug)
  220. printk("Clock = %ld, jiffies = %ld\n", SHAPERCB(skb)->shapeclock, jiffies);
  221. if(time_before_eq(SHAPERCB(skb)->shapeclock, jiffies + SHAPER_BURST))
  222. {
  223. /*
  224. * Pull the frame and get interrupts back on.
  225. */
  226. skb_unlink(skb, &shaper->sendq);
  227. if (shaper->recovery <
  228. SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
  229. shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
  230. /*
  231. * Pass on to the physical target device via
  232. * our low level packet thrower.
  233. */
  234. SHAPERCB(skb)->shapepend=0;
  235. shaper_queue_xmit(shaper, skb); /* Fire */
  236. }
  237. else
  238. break;
  239. }
  240. /*
  241. * Next kick.
  242. */
  243. if(skb!=NULL)
  244. mod_timer(&shaper->timer, SHAPERCB(skb)->shapeclock);
  245. }
  246. /*
  247. * Bring the interface up. We just disallow this until a
  248. * bind.
  249. */
  250. static int shaper_open(struct net_device *dev)
  251. {
  252. struct shaper *shaper=dev->priv;
  253. /*
  254. * Can't open until attached.
  255. * Also can't open until speed is set, or we'll get
  256. * a division by zero.
  257. */
  258. if(shaper->dev==NULL)
  259. return -ENODEV;
  260. if(shaper->bitspersec==0)
  261. return -EINVAL;
  262. return 0;
  263. }
  264. /*
  265. * Closing a shaper flushes the queues.
  266. */
  267. static int shaper_close(struct net_device *dev)
  268. {
  269. struct shaper *shaper=dev->priv;
  270. struct sk_buff *skb;
  271. while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
  272. dev_kfree_skb(skb);
  273. spin_lock_bh(&shaper->lock);
  274. shaper_kick(shaper);
  275. spin_unlock_bh(&shaper->lock);
  276. del_timer_sync(&shaper->timer);
  277. return 0;
  278. }
  279. /*
  280. * Revectored calls. We alter the parameters and call the functions
  281. * for our attached device. This enables us to bandwidth allocate after
  282. * ARP and other resolutions and not before.
  283. */
  284. static struct net_device_stats *shaper_get_stats(struct net_device *dev)
  285. {
  286. struct shaper *sh=dev->priv;
  287. return &sh->stats;
  288. }
  289. static int shaper_header(struct sk_buff *skb, struct net_device *dev,
  290. unsigned short type, void *daddr, void *saddr, unsigned len)
  291. {
  292. struct shaper *sh=dev->priv;
  293. int v;
  294. if(sh_debug)
  295. printk("Shaper header\n");
  296. skb->dev=sh->dev;
  297. v=sh->hard_header(skb,sh->dev,type,daddr,saddr,len);
  298. skb->dev=dev;
  299. return v;
  300. }
  301. static int shaper_rebuild_header(struct sk_buff *skb)
  302. {
  303. struct shaper *sh=skb->dev->priv;
  304. struct net_device *dev=skb->dev;
  305. int v;
  306. if(sh_debug)
  307. printk("Shaper rebuild header\n");
  308. skb->dev=sh->dev;
  309. v=sh->rebuild_header(skb);
  310. skb->dev=dev;
  311. return v;
  312. }
  313. #if 0
  314. static int shaper_cache(struct neighbour *neigh, struct hh_cache *hh)
  315. {
  316. struct shaper *sh=neigh->dev->priv;
  317. struct net_device *tmp;
  318. int ret;
  319. if(sh_debug)
  320. printk("Shaper header cache bind\n");
  321. tmp=neigh->dev;
  322. neigh->dev=sh->dev;
  323. ret=sh->hard_header_cache(neigh,hh);
  324. neigh->dev=tmp;
  325. return ret;
  326. }
  327. static void shaper_cache_update(struct hh_cache *hh, struct net_device *dev,
  328. unsigned char *haddr)
  329. {
  330. struct shaper *sh=dev->priv;
  331. if(sh_debug)
  332. printk("Shaper cache update\n");
  333. sh->header_cache_update(hh, sh->dev, haddr);
  334. }
  335. #endif
  336. #ifdef CONFIG_INET
  337. static int shaper_neigh_setup(struct neighbour *n)
  338. {
  339. #ifdef CONFIG_INET
  340. if (n->nud_state == NUD_NONE) {
  341. n->ops = &arp_broken_ops;
  342. n->output = n->ops->output;
  343. }
  344. #endif
  345. return 0;
  346. }
  347. static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
  348. {
  349. #ifdef CONFIG_INET
  350. if (p->tbl->family == AF_INET) {
  351. p->neigh_setup = shaper_neigh_setup;
  352. p->ucast_probes = 0;
  353. p->mcast_probes = 0;
  354. }
  355. #endif
  356. return 0;
  357. }
  358. #else /* !(CONFIG_INET) */
  359. static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
  360. {
  361. return 0;
  362. }
  363. #endif
  364. static int shaper_attach(struct net_device *shdev, struct shaper *sh, struct net_device *dev)
  365. {
  366. sh->dev = dev;
  367. sh->hard_start_xmit=dev->hard_start_xmit;
  368. sh->get_stats=dev->get_stats;
  369. if(dev->hard_header)
  370. {
  371. sh->hard_header=dev->hard_header;
  372. shdev->hard_header = shaper_header;
  373. }
  374. else
  375. shdev->hard_header = NULL;
  376. if(dev->rebuild_header)
  377. {
  378. sh->rebuild_header = dev->rebuild_header;
  379. shdev->rebuild_header = shaper_rebuild_header;
  380. }
  381. else
  382. shdev->rebuild_header = NULL;
  383. #if 0
  384. if(dev->hard_header_cache)
  385. {
  386. sh->hard_header_cache = dev->hard_header_cache;
  387. shdev->hard_header_cache= shaper_cache;
  388. }
  389. else
  390. {
  391. shdev->hard_header_cache= NULL;
  392. }
  393. if(dev->header_cache_update)
  394. {
  395. sh->header_cache_update = dev->header_cache_update;
  396. shdev->header_cache_update = shaper_cache_update;
  397. }
  398. else
  399. shdev->header_cache_update= NULL;
  400. #else
  401. shdev->header_cache_update = NULL;
  402. shdev->hard_header_cache = NULL;
  403. #endif
  404. shdev->neigh_setup = shaper_neigh_setup_dev;
  405. shdev->hard_header_len=dev->hard_header_len;
  406. shdev->type=dev->type;
  407. shdev->addr_len=dev->addr_len;
  408. shdev->mtu=dev->mtu;
  409. sh->bitspersec=0;
  410. return 0;
  411. }
  412. static int shaper_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  413. {
  414. struct shaperconf *ss= (struct shaperconf *)&ifr->ifr_ifru;
  415. struct shaper *sh=dev->priv;
  416. if(ss->ss_cmd == SHAPER_SET_DEV || ss->ss_cmd == SHAPER_SET_SPEED)
  417. {
  418. if(!capable(CAP_NET_ADMIN))
  419. return -EPERM;
  420. }
  421. switch(ss->ss_cmd)
  422. {
  423. case SHAPER_SET_DEV:
  424. {
  425. struct net_device *them=__dev_get_by_name(ss->ss_name);
  426. if(them==NULL)
  427. return -ENODEV;
  428. if(sh->dev)
  429. return -EBUSY;
  430. return shaper_attach(dev,dev->priv, them);
  431. }
  432. case SHAPER_GET_DEV:
  433. if(sh->dev==NULL)
  434. return -ENODEV;
  435. strcpy(ss->ss_name, sh->dev->name);
  436. return 0;
  437. case SHAPER_SET_SPEED:
  438. shaper_setspeed(sh,ss->ss_speed);
  439. return 0;
  440. case SHAPER_GET_SPEED:
  441. ss->ss_speed=sh->bitspersec;
  442. return 0;
  443. default:
  444. return -EINVAL;
  445. }
  446. }
  447. static void shaper_init_priv(struct net_device *dev)
  448. {
  449. struct shaper *sh = dev->priv;
  450. skb_queue_head_init(&sh->sendq);
  451. init_timer(&sh->timer);
  452. sh->timer.function=shaper_timer;
  453. sh->timer.data=(unsigned long)sh;
  454. spin_lock_init(&sh->lock);
  455. }
  456. /*
  457. * Add a shaper device to the system
  458. */
  459. static void __init shaper_setup(struct net_device *dev)
  460. {
  461. /*
  462. * Set up the shaper.
  463. */
  464. SET_MODULE_OWNER(dev);
  465. shaper_init_priv(dev);
  466. dev->open = shaper_open;
  467. dev->stop = shaper_close;
  468. dev->hard_start_xmit = shaper_start_xmit;
  469. dev->get_stats = shaper_get_stats;
  470. dev->set_multicast_list = NULL;
  471. /*
  472. * Intialise the packet queues
  473. */
  474. /*
  475. * Handlers for when we attach to a device.
  476. */
  477. dev->hard_header = shaper_header;
  478. dev->rebuild_header = shaper_rebuild_header;
  479. #if 0
  480. dev->hard_header_cache = shaper_cache;
  481. dev->header_cache_update= shaper_cache_update;
  482. #endif
  483. dev->neigh_setup = shaper_neigh_setup_dev;
  484. dev->do_ioctl = shaper_ioctl;
  485. dev->hard_header_len = 0;
  486. dev->type = ARPHRD_ETHER; /* initially */
  487. dev->set_mac_address = NULL;
  488. dev->mtu = 1500;
  489. dev->addr_len = 0;
  490. dev->tx_queue_len = 10;
  491. dev->flags = 0;
  492. }
  493. static int shapers = 1;
  494. #ifdef MODULE
  495. module_param(shapers, int, 0);
  496. MODULE_PARM_DESC(shapers, "Traffic shaper: maximum number of shapers");
  497. #else /* MODULE */
  498. static int __init set_num_shapers(char *str)
  499. {
  500. shapers = simple_strtol(str, NULL, 0);
  501. return 1;
  502. }
  503. __setup("shapers=", set_num_shapers);
  504. #endif /* MODULE */
  505. static struct net_device **devs;
  506. static unsigned int shapers_registered = 0;
  507. static int __init shaper_init(void)
  508. {
  509. int i;
  510. size_t alloc_size;
  511. struct net_device *dev;
  512. char name[IFNAMSIZ];
  513. if (shapers < 1)
  514. return -ENODEV;
  515. alloc_size = sizeof(*dev) * shapers;
  516. devs = kmalloc(alloc_size, GFP_KERNEL);
  517. if (!devs)
  518. return -ENOMEM;
  519. memset(devs, 0, alloc_size);
  520. for (i = 0; i < shapers; i++) {
  521. snprintf(name, IFNAMSIZ, "shaper%d", i);
  522. dev = alloc_netdev(sizeof(struct shaper), name,
  523. shaper_setup);
  524. if (!dev)
  525. break;
  526. if (register_netdev(dev)) {
  527. free_netdev(dev);
  528. break;
  529. }
  530. devs[i] = dev;
  531. shapers_registered++;
  532. }
  533. if (!shapers_registered) {
  534. kfree(devs);
  535. devs = NULL;
  536. }
  537. return (shapers_registered ? 0 : -ENODEV);
  538. }
  539. static void __exit shaper_exit (void)
  540. {
  541. int i;
  542. for (i = 0; i < shapers_registered; i++) {
  543. if (devs[i]) {
  544. unregister_netdev(devs[i]);
  545. free_netdev(devs[i]);
  546. }
  547. }
  548. kfree(devs);
  549. devs = NULL;
  550. }
  551. module_init(shaper_init);
  552. module_exit(shaper_exit);
  553. MODULE_LICENSE("GPL");