dsa.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * net/dsa/dsa.c - Hardware switch handling
  4. * Copyright (c) 2008-2009 Marvell Semiconductor
  5. * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
  6. */
  7. #include <linux/device.h>
  8. #include <linux/list.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/slab.h>
  11. #include <linux/module.h>
  12. #include <linux/notifier.h>
  13. #include <linux/of.h>
  14. #include <linux/of_mdio.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/of_net.h>
  17. #include <linux/netdevice.h>
  18. #include <linux/sysfs.h>
  19. #include <linux/phy_fixed.h>
  20. #include <linux/ptp_classify.h>
  21. #include <linux/etherdevice.h>
  22. #include "dsa_priv.h"
  23. static LIST_HEAD(dsa_tag_drivers_list);
  24. static DEFINE_MUTEX(dsa_tag_drivers_lock);
  25. static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
  26. struct net_device *dev)
  27. {
  28. /* Just return the original SKB */
  29. return skb;
  30. }
  31. static const struct dsa_device_ops none_ops = {
  32. .name = "none",
  33. .proto = DSA_TAG_PROTO_NONE,
  34. .xmit = dsa_slave_notag_xmit,
  35. .rcv = NULL,
  36. };
  37. DSA_TAG_DRIVER(none_ops);
  38. static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
  39. struct module *owner)
  40. {
  41. dsa_tag_driver->owner = owner;
  42. mutex_lock(&dsa_tag_drivers_lock);
  43. list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
  44. mutex_unlock(&dsa_tag_drivers_lock);
  45. }
  46. void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
  47. unsigned int count, struct module *owner)
  48. {
  49. unsigned int i;
  50. for (i = 0; i < count; i++)
  51. dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
  52. }
  53. static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
  54. {
  55. mutex_lock(&dsa_tag_drivers_lock);
  56. list_del(&dsa_tag_driver->list);
  57. mutex_unlock(&dsa_tag_drivers_lock);
  58. }
  59. EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
  60. void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
  61. unsigned int count)
  62. {
  63. unsigned int i;
  64. for (i = 0; i < count; i++)
  65. dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
  66. }
  67. EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
  68. const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
  69. {
  70. return ops->name;
  71. };
  72. const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol)
  73. {
  74. struct dsa_tag_driver *dsa_tag_driver;
  75. const struct dsa_device_ops *ops;
  76. bool found = false;
  77. request_module("%s%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
  78. mutex_lock(&dsa_tag_drivers_lock);
  79. list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
  80. ops = dsa_tag_driver->ops;
  81. if (ops->proto == tag_protocol) {
  82. found = true;
  83. break;
  84. }
  85. }
  86. if (found) {
  87. if (!try_module_get(dsa_tag_driver->owner))
  88. ops = ERR_PTR(-ENOPROTOOPT);
  89. } else {
  90. ops = ERR_PTR(-ENOPROTOOPT);
  91. }
  92. mutex_unlock(&dsa_tag_drivers_lock);
  93. return ops;
  94. }
  95. void dsa_tag_driver_put(const struct dsa_device_ops *ops)
  96. {
  97. struct dsa_tag_driver *dsa_tag_driver;
  98. mutex_lock(&dsa_tag_drivers_lock);
  99. list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
  100. if (dsa_tag_driver->ops == ops) {
  101. module_put(dsa_tag_driver->owner);
  102. break;
  103. }
  104. }
  105. mutex_unlock(&dsa_tag_drivers_lock);
  106. }
  107. static int dev_is_class(struct device *dev, void *class)
  108. {
  109. if (dev->class != NULL && !strcmp(dev->class->name, class))
  110. return 1;
  111. return 0;
  112. }
  113. static struct device *dev_find_class(struct device *parent, char *class)
  114. {
  115. if (dev_is_class(parent, class)) {
  116. get_device(parent);
  117. return parent;
  118. }
  119. return device_find_child(parent, class, dev_is_class);
  120. }
  121. struct net_device *dsa_dev_to_net_device(struct device *dev)
  122. {
  123. struct device *d;
  124. d = dev_find_class(dev, "net");
  125. if (d != NULL) {
  126. struct net_device *nd;
  127. nd = to_net_dev(d);
  128. dev_hold(nd);
  129. put_device(d);
  130. return nd;
  131. }
  132. return NULL;
  133. }
  134. EXPORT_SYMBOL_GPL(dsa_dev_to_net_device);
  135. /* Determine if we should defer delivery of skb until we have a rx timestamp.
  136. *
  137. * Called from dsa_switch_rcv. For now, this will only work if tagging is
  138. * enabled on the switch. Normally the MAC driver would retrieve the hardware
  139. * timestamp when it reads the packet out of the hardware. However in a DSA
  140. * switch, the DSA driver owning the interface to which the packet is
  141. * delivered is never notified unless we do so here.
  142. */
  143. static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
  144. struct sk_buff *skb)
  145. {
  146. struct dsa_switch *ds = p->dp->ds;
  147. unsigned int type;
  148. if (skb_headroom(skb) < ETH_HLEN)
  149. return false;
  150. __skb_push(skb, ETH_HLEN);
  151. type = ptp_classify_raw(skb);
  152. __skb_pull(skb, ETH_HLEN);
  153. if (type == PTP_CLASS_NONE)
  154. return false;
  155. if (likely(ds->ops->port_rxtstamp))
  156. return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
  157. return false;
  158. }
  159. static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
  160. struct packet_type *pt, struct net_device *unused)
  161. {
  162. struct dsa_port *cpu_dp = dev->dsa_ptr;
  163. struct sk_buff *nskb = NULL;
  164. struct pcpu_sw_netstats *s;
  165. struct dsa_slave_priv *p;
  166. if (unlikely(!cpu_dp)) {
  167. kfree_skb(skb);
  168. return 0;
  169. }
  170. skb = skb_unshare(skb, GFP_ATOMIC);
  171. if (!skb)
  172. return 0;
  173. nskb = cpu_dp->rcv(skb, dev, pt);
  174. if (!nskb) {
  175. kfree_skb(skb);
  176. return 0;
  177. }
  178. skb = nskb;
  179. p = netdev_priv(skb->dev);
  180. skb_push(skb, ETH_HLEN);
  181. skb->pkt_type = PACKET_HOST;
  182. skb->protocol = eth_type_trans(skb, skb->dev);
  183. if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
  184. nskb = dsa_untag_bridge_pvid(skb);
  185. if (!nskb) {
  186. kfree_skb(skb);
  187. return 0;
  188. }
  189. skb = nskb;
  190. }
  191. s = this_cpu_ptr(p->stats64);
  192. u64_stats_update_begin(&s->syncp);
  193. s->rx_packets++;
  194. s->rx_bytes += skb->len;
  195. u64_stats_update_end(&s->syncp);
  196. if (dsa_skb_defer_rx_timestamp(p, skb))
  197. return 0;
  198. gro_cells_receive(&p->gcells, skb);
  199. return 0;
  200. }
  201. #ifdef CONFIG_PM_SLEEP
  202. static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
  203. {
  204. const struct dsa_port *dp = dsa_to_port(ds, p);
  205. return dp->type == DSA_PORT_TYPE_USER && dp->slave;
  206. }
  207. int dsa_switch_suspend(struct dsa_switch *ds)
  208. {
  209. int i, ret = 0;
  210. /* Suspend slave network devices */
  211. for (i = 0; i < ds->num_ports; i++) {
  212. if (!dsa_is_port_initialized(ds, i))
  213. continue;
  214. ret = dsa_slave_suspend(dsa_to_port(ds, i)->slave);
  215. if (ret)
  216. return ret;
  217. }
  218. if (ds->ops->suspend)
  219. ret = ds->ops->suspend(ds);
  220. return ret;
  221. }
  222. EXPORT_SYMBOL_GPL(dsa_switch_suspend);
  223. int dsa_switch_resume(struct dsa_switch *ds)
  224. {
  225. int i, ret = 0;
  226. if (ds->ops->resume)
  227. ret = ds->ops->resume(ds);
  228. if (ret)
  229. return ret;
  230. /* Resume slave network devices */
  231. for (i = 0; i < ds->num_ports; i++) {
  232. if (!dsa_is_port_initialized(ds, i))
  233. continue;
  234. ret = dsa_slave_resume(dsa_to_port(ds, i)->slave);
  235. if (ret)
  236. return ret;
  237. }
  238. return 0;
  239. }
  240. EXPORT_SYMBOL_GPL(dsa_switch_resume);
  241. #endif
  242. static struct packet_type dsa_pack_type __read_mostly = {
  243. .type = cpu_to_be16(ETH_P_XDSA),
  244. .func = dsa_switch_rcv,
  245. };
  246. static struct workqueue_struct *dsa_owq;
  247. bool dsa_schedule_work(struct work_struct *work)
  248. {
  249. return queue_work(dsa_owq, work);
  250. }
  251. static ATOMIC_NOTIFIER_HEAD(dsa_notif_chain);
  252. int register_dsa_notifier(struct notifier_block *nb)
  253. {
  254. return atomic_notifier_chain_register(&dsa_notif_chain, nb);
  255. }
  256. EXPORT_SYMBOL_GPL(register_dsa_notifier);
  257. int unregister_dsa_notifier(struct notifier_block *nb)
  258. {
  259. return atomic_notifier_chain_unregister(&dsa_notif_chain, nb);
  260. }
  261. EXPORT_SYMBOL_GPL(unregister_dsa_notifier);
  262. int call_dsa_notifiers(unsigned long val, struct net_device *dev,
  263. struct dsa_notifier_info *info)
  264. {
  265. info->dev = dev;
  266. return atomic_notifier_call_chain(&dsa_notif_chain, val, info);
  267. }
  268. EXPORT_SYMBOL_GPL(call_dsa_notifiers);
  269. int dsa_devlink_param_get(struct devlink *dl, u32 id,
  270. struct devlink_param_gset_ctx *ctx)
  271. {
  272. struct dsa_switch *ds = dsa_devlink_to_ds(dl);
  273. if (!ds->ops->devlink_param_get)
  274. return -EOPNOTSUPP;
  275. return ds->ops->devlink_param_get(ds, id, ctx);
  276. }
  277. EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
  278. int dsa_devlink_param_set(struct devlink *dl, u32 id,
  279. struct devlink_param_gset_ctx *ctx)
  280. {
  281. struct dsa_switch *ds = dsa_devlink_to_ds(dl);
  282. if (!ds->ops->devlink_param_set)
  283. return -EOPNOTSUPP;
  284. return ds->ops->devlink_param_set(ds, id, ctx);
  285. }
  286. EXPORT_SYMBOL_GPL(dsa_devlink_param_set);
  287. int dsa_devlink_params_register(struct dsa_switch *ds,
  288. const struct devlink_param *params,
  289. size_t params_count)
  290. {
  291. return devlink_params_register(ds->devlink, params, params_count);
  292. }
  293. EXPORT_SYMBOL_GPL(dsa_devlink_params_register);
  294. void dsa_devlink_params_unregister(struct dsa_switch *ds,
  295. const struct devlink_param *params,
  296. size_t params_count)
  297. {
  298. devlink_params_unregister(ds->devlink, params, params_count);
  299. }
  300. EXPORT_SYMBOL_GPL(dsa_devlink_params_unregister);
  301. int dsa_devlink_resource_register(struct dsa_switch *ds,
  302. const char *resource_name,
  303. u64 resource_size,
  304. u64 resource_id,
  305. u64 parent_resource_id,
  306. const struct devlink_resource_size_params *size_params)
  307. {
  308. return devlink_resource_register(ds->devlink, resource_name,
  309. resource_size, resource_id,
  310. parent_resource_id,
  311. size_params);
  312. }
  313. EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
  314. void dsa_devlink_resources_unregister(struct dsa_switch *ds)
  315. {
  316. devlink_resources_unregister(ds->devlink, NULL);
  317. }
  318. EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister);
  319. void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
  320. u64 resource_id,
  321. devlink_resource_occ_get_t *occ_get,
  322. void *occ_get_priv)
  323. {
  324. return devlink_resource_occ_get_register(ds->devlink, resource_id,
  325. occ_get, occ_get_priv);
  326. }
  327. EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
  328. void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
  329. u64 resource_id)
  330. {
  331. devlink_resource_occ_get_unregister(ds->devlink, resource_id);
  332. }
  333. EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
  334. struct devlink_region *
  335. dsa_devlink_region_create(struct dsa_switch *ds,
  336. const struct devlink_region_ops *ops,
  337. u32 region_max_snapshots, u64 region_size)
  338. {
  339. return devlink_region_create(ds->devlink, ops, region_max_snapshots,
  340. region_size);
  341. }
  342. EXPORT_SYMBOL_GPL(dsa_devlink_region_create);
  343. struct devlink_region *
  344. dsa_devlink_port_region_create(struct dsa_switch *ds,
  345. int port,
  346. const struct devlink_port_region_ops *ops,
  347. u32 region_max_snapshots, u64 region_size)
  348. {
  349. struct dsa_port *dp = dsa_to_port(ds, port);
  350. return devlink_port_region_create(&dp->devlink_port, ops,
  351. region_max_snapshots,
  352. region_size);
  353. }
  354. EXPORT_SYMBOL_GPL(dsa_devlink_port_region_create);
  355. void dsa_devlink_region_destroy(struct devlink_region *region)
  356. {
  357. devlink_region_destroy(region);
  358. }
  359. EXPORT_SYMBOL_GPL(dsa_devlink_region_destroy);
  360. struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
  361. {
  362. if (!netdev || !dsa_slave_dev_check(netdev))
  363. return ERR_PTR(-ENODEV);
  364. return dsa_slave_to_port(netdev);
  365. }
  366. EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
  367. static int __init dsa_init_module(void)
  368. {
  369. int rc;
  370. dsa_owq = alloc_ordered_workqueue("dsa_ordered",
  371. WQ_MEM_RECLAIM);
  372. if (!dsa_owq)
  373. return -ENOMEM;
  374. rc = dsa_slave_register_notifier();
  375. if (rc)
  376. goto register_notifier_fail;
  377. dev_add_pack(&dsa_pack_type);
  378. dsa_tag_driver_register(&DSA_TAG_DRIVER_NAME(none_ops),
  379. THIS_MODULE);
  380. return 0;
  381. register_notifier_fail:
  382. destroy_workqueue(dsa_owq);
  383. return rc;
  384. }
  385. module_init(dsa_init_module);
  386. static void __exit dsa_cleanup_module(void)
  387. {
  388. dsa_tag_driver_unregister(&DSA_TAG_DRIVER_NAME(none_ops));
  389. dsa_slave_unregister_notifier();
  390. dev_remove_pack(&dsa_pack_type);
  391. destroy_workqueue(dsa_owq);
  392. }
  393. module_exit(dsa_cleanup_module);
  394. MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
  395. MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
  396. MODULE_LICENSE("GPL");
  397. MODULE_ALIAS("platform:dsa");