bundle.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Greybus bundles
  4. *
  5. * Copyright 2014-2015 Google Inc.
  6. * Copyright 2014-2015 Linaro Ltd.
  7. */
  8. #include <linux/greybus.h>
  9. #include "greybus_trace.h"
  10. static ssize_t bundle_class_show(struct device *dev,
  11. struct device_attribute *attr, char *buf)
  12. {
  13. struct gb_bundle *bundle = to_gb_bundle(dev);
  14. return sprintf(buf, "0x%02x\n", bundle->class);
  15. }
  16. static DEVICE_ATTR_RO(bundle_class);
  17. static ssize_t bundle_id_show(struct device *dev,
  18. struct device_attribute *attr, char *buf)
  19. {
  20. struct gb_bundle *bundle = to_gb_bundle(dev);
  21. return sprintf(buf, "%u\n", bundle->id);
  22. }
  23. static DEVICE_ATTR_RO(bundle_id);
  24. static ssize_t state_show(struct device *dev, struct device_attribute *attr,
  25. char *buf)
  26. {
  27. struct gb_bundle *bundle = to_gb_bundle(dev);
  28. if (!bundle->state)
  29. return sprintf(buf, "\n");
  30. return sprintf(buf, "%s\n", bundle->state);
  31. }
  32. static ssize_t state_store(struct device *dev, struct device_attribute *attr,
  33. const char *buf, size_t size)
  34. {
  35. struct gb_bundle *bundle = to_gb_bundle(dev);
  36. kfree(bundle->state);
  37. bundle->state = kstrdup(buf, GFP_KERNEL);
  38. if (!bundle->state)
  39. return -ENOMEM;
  40. /* Tell userspace that the file contents changed */
  41. sysfs_notify(&bundle->dev.kobj, NULL, "state");
  42. return size;
  43. }
  44. static DEVICE_ATTR_RW(state);
  45. static struct attribute *bundle_attrs[] = {
  46. &dev_attr_bundle_class.attr,
  47. &dev_attr_bundle_id.attr,
  48. &dev_attr_state.attr,
  49. NULL,
  50. };
  51. ATTRIBUTE_GROUPS(bundle);
  52. static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
  53. u8 bundle_id)
  54. {
  55. struct gb_bundle *bundle;
  56. list_for_each_entry(bundle, &intf->bundles, links) {
  57. if (bundle->id == bundle_id)
  58. return bundle;
  59. }
  60. return NULL;
  61. }
  62. static void gb_bundle_release(struct device *dev)
  63. {
  64. struct gb_bundle *bundle = to_gb_bundle(dev);
  65. trace_gb_bundle_release(bundle);
  66. kfree(bundle->state);
  67. kfree(bundle->cport_desc);
  68. kfree(bundle);
  69. }
  70. #ifdef CONFIG_PM
  71. static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
  72. {
  73. struct gb_connection *connection;
  74. list_for_each_entry(connection, &bundle->connections, bundle_links)
  75. gb_connection_disable(connection);
  76. }
  77. static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
  78. {
  79. struct gb_connection *connection;
  80. list_for_each_entry(connection, &bundle->connections, bundle_links)
  81. gb_connection_enable(connection);
  82. }
  83. static int gb_bundle_suspend(struct device *dev)
  84. {
  85. struct gb_bundle *bundle = to_gb_bundle(dev);
  86. const struct dev_pm_ops *pm = dev->driver->pm;
  87. int ret;
  88. if (pm && pm->runtime_suspend) {
  89. ret = pm->runtime_suspend(&bundle->dev);
  90. if (ret)
  91. return ret;
  92. } else {
  93. gb_bundle_disable_all_connections(bundle);
  94. }
  95. ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
  96. if (ret) {
  97. if (pm && pm->runtime_resume)
  98. ret = pm->runtime_resume(dev);
  99. else
  100. gb_bundle_enable_all_connections(bundle);
  101. return ret;
  102. }
  103. return 0;
  104. }
  105. static int gb_bundle_resume(struct device *dev)
  106. {
  107. struct gb_bundle *bundle = to_gb_bundle(dev);
  108. const struct dev_pm_ops *pm = dev->driver->pm;
  109. int ret;
  110. ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
  111. if (ret)
  112. return ret;
  113. if (pm && pm->runtime_resume) {
  114. ret = pm->runtime_resume(dev);
  115. if (ret)
  116. return ret;
  117. } else {
  118. gb_bundle_enable_all_connections(bundle);
  119. }
  120. return 0;
  121. }
  122. static int gb_bundle_idle(struct device *dev)
  123. {
  124. pm_runtime_mark_last_busy(dev);
  125. pm_request_autosuspend(dev);
  126. return 0;
  127. }
  128. #endif
  129. static const struct dev_pm_ops gb_bundle_pm_ops = {
  130. SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
  131. };
  132. struct device_type greybus_bundle_type = {
  133. .name = "greybus_bundle",
  134. .release = gb_bundle_release,
  135. .pm = &gb_bundle_pm_ops,
  136. };
  137. /*
  138. * Create a gb_bundle structure to represent a discovered
  139. * bundle. Returns a pointer to the new bundle or a null
  140. * pointer if a failure occurs due to memory exhaustion.
  141. */
  142. struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
  143. u8 class)
  144. {
  145. struct gb_bundle *bundle;
  146. if (bundle_id == BUNDLE_ID_NONE) {
  147. dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
  148. return NULL;
  149. }
  150. /*
  151. * Reject any attempt to reuse a bundle id. We initialize
  152. * these serially, so there's no need to worry about keeping
  153. * the interface bundle list locked here.
  154. */
  155. if (gb_bundle_find(intf, bundle_id)) {
  156. dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
  157. return NULL;
  158. }
  159. bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
  160. if (!bundle)
  161. return NULL;
  162. bundle->intf = intf;
  163. bundle->id = bundle_id;
  164. bundle->class = class;
  165. INIT_LIST_HEAD(&bundle->connections);
  166. bundle->dev.parent = &intf->dev;
  167. bundle->dev.bus = &greybus_bus_type;
  168. bundle->dev.type = &greybus_bundle_type;
  169. bundle->dev.groups = bundle_groups;
  170. bundle->dev.dma_mask = intf->dev.dma_mask;
  171. device_initialize(&bundle->dev);
  172. dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
  173. list_add(&bundle->links, &intf->bundles);
  174. trace_gb_bundle_create(bundle);
  175. return bundle;
  176. }
  177. int gb_bundle_add(struct gb_bundle *bundle)
  178. {
  179. int ret;
  180. ret = device_add(&bundle->dev);
  181. if (ret) {
  182. dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
  183. return ret;
  184. }
  185. trace_gb_bundle_add(bundle);
  186. return 0;
  187. }
  188. /*
  189. * Tear down a previously set up bundle.
  190. */
  191. void gb_bundle_destroy(struct gb_bundle *bundle)
  192. {
  193. trace_gb_bundle_destroy(bundle);
  194. if (device_is_registered(&bundle->dev))
  195. device_del(&bundle->dev);
  196. list_del(&bundle->links);
  197. put_device(&bundle->dev);
  198. }