hwspinlock-uclass.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144
  1. // SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
  2. /*
  3. * Copyright (C) 2018, STMicroelectronics - All Rights Reserved
  4. */
  5. #include <common.h>
  6. #include <dm.h>
  7. #include <errno.h>
  8. #include <hwspinlock.h>
  9. #include <dm/device-internal.h>
  10. static inline const struct hwspinlock_ops *
  11. hwspinlock_dev_ops(struct udevice *dev)
  12. {
  13. return (const struct hwspinlock_ops *)dev->driver->ops;
  14. }
  15. static int hwspinlock_of_xlate_default(struct hwspinlock *hws,
  16. struct ofnode_phandle_args *args)
  17. {
  18. if (args->args_count > 1) {
  19. debug("Invaild args_count: %d\n", args->args_count);
  20. return -EINVAL;
  21. }
  22. if (args->args_count)
  23. hws->id = args->args[0];
  24. else
  25. hws->id = 0;
  26. return 0;
  27. }
  28. int hwspinlock_get_by_index(struct udevice *dev, int index,
  29. struct hwspinlock *hws)
  30. {
  31. int ret;
  32. struct ofnode_phandle_args args;
  33. struct udevice *dev_hws;
  34. const struct hwspinlock_ops *ops;
  35. assert(hws);
  36. hws->dev = NULL;
  37. ret = dev_read_phandle_with_args(dev, "hwlocks", "#hwlock-cells", 1,
  38. index, &args);
  39. if (ret) {
  40. dev_dbg(dev, "%s: dev_read_phandle_with_args: err=%d\n",
  41. __func__, ret);
  42. return ret;
  43. }
  44. ret = uclass_get_device_by_ofnode(UCLASS_HWSPINLOCK,
  45. args.node, &dev_hws);
  46. if (ret) {
  47. dev_dbg(dev,
  48. "%s: uclass_get_device_by_of_offset failed: err=%d\n",
  49. __func__, ret);
  50. return ret;
  51. }
  52. hws->dev = dev_hws;
  53. ops = hwspinlock_dev_ops(dev_hws);
  54. if (ops->of_xlate)
  55. ret = ops->of_xlate(hws, &args);
  56. else
  57. ret = hwspinlock_of_xlate_default(hws, &args);
  58. if (ret)
  59. dev_dbg(dev, "of_xlate() failed: %d\n", ret);
  60. return ret;
  61. }
  62. int hwspinlock_lock_timeout(struct hwspinlock *hws, unsigned int timeout)
  63. {
  64. const struct hwspinlock_ops *ops;
  65. ulong start;
  66. int ret;
  67. assert(hws);
  68. if (!hws->dev)
  69. return -EINVAL;
  70. ops = hwspinlock_dev_ops(hws->dev);
  71. if (!ops->lock)
  72. return -ENOSYS;
  73. start = get_timer(0);
  74. do {
  75. ret = ops->lock(hws->dev, hws->id);
  76. if (!ret)
  77. return ret;
  78. if (ops->relax)
  79. ops->relax(hws->dev);
  80. } while (get_timer(start) < timeout);
  81. return -ETIMEDOUT;
  82. }
  83. int hwspinlock_unlock(struct hwspinlock *hws)
  84. {
  85. const struct hwspinlock_ops *ops;
  86. assert(hws);
  87. if (!hws->dev)
  88. return -EINVAL;
  89. ops = hwspinlock_dev_ops(hws->dev);
  90. if (!ops->unlock)
  91. return -ENOSYS;
  92. return ops->unlock(hws->dev, hws->id);
  93. }
  94. static int hwspinlock_post_bind(struct udevice *dev)
  95. {
  96. #if defined(CONFIG_NEEDS_MANUAL_RELOC)
  97. struct hwspinlock_ops *ops = device_get_ops(dev);
  98. static int reloc_done;
  99. if (!reloc_done) {
  100. if (ops->lock)
  101. ops->lock += gd->reloc_off;
  102. if (ops->unlock)
  103. ops->unlock += gd->reloc_off;
  104. if (ops->relax)
  105. ops->relax += gd->reloc_off;
  106. reloc_done++;
  107. }
  108. #endif
  109. return 0;
  110. }
  111. UCLASS_DRIVER(hwspinlock) = {
  112. .id = UCLASS_HWSPINLOCK,
  113. .name = "hwspinlock",
  114. .post_bind = hwspinlock_post_bind,
  115. };