dfl-fme-error.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Driver for FPGA Management Engine Error Management
  4. *
  5. * Copyright 2019 Intel Corporation, Inc.
  6. *
  7. * Authors:
  8. * Kang Luwei <luwei.kang@intel.com>
  9. * Xiao Guangrong <guangrong.xiao@linux.intel.com>
  10. * Wu Hao <hao.wu@intel.com>
  11. * Joseph Grecco <joe.grecco@intel.com>
  12. * Enno Luebbers <enno.luebbers@intel.com>
  13. * Tim Whisonant <tim.whisonant@intel.com>
  14. * Ananda Ravuri <ananda.ravuri@intel.com>
  15. * Mitchel, Henry <henry.mitchel@intel.com>
  16. */
  17. #include <linux/fpga-dfl.h>
  18. #include <linux/uaccess.h>
  19. #include "dfl.h"
  20. #include "dfl-fme.h"
  21. #define FME_ERROR_MASK 0x8
  22. #define FME_ERROR 0x10
  23. #define MBP_ERROR BIT_ULL(6)
  24. #define PCIE0_ERROR_MASK 0x18
  25. #define PCIE0_ERROR 0x20
  26. #define PCIE1_ERROR_MASK 0x28
  27. #define PCIE1_ERROR 0x30
  28. #define FME_FIRST_ERROR 0x38
  29. #define FME_NEXT_ERROR 0x40
  30. #define RAS_NONFAT_ERROR_MASK 0x48
  31. #define RAS_NONFAT_ERROR 0x50
  32. #define RAS_CATFAT_ERROR_MASK 0x58
  33. #define RAS_CATFAT_ERROR 0x60
  34. #define RAS_ERROR_INJECT 0x68
  35. #define INJECT_ERROR_MASK GENMASK_ULL(2, 0)
  36. #define ERROR_MASK GENMASK_ULL(63, 0)
  37. static ssize_t pcie0_errors_show(struct device *dev,
  38. struct device_attribute *attr, char *buf)
  39. {
  40. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  41. void __iomem *base;
  42. u64 value;
  43. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
  44. mutex_lock(&pdata->lock);
  45. value = readq(base + PCIE0_ERROR);
  46. mutex_unlock(&pdata->lock);
  47. return sprintf(buf, "0x%llx\n", (unsigned long long)value);
  48. }
  49. static ssize_t pcie0_errors_store(struct device *dev,
  50. struct device_attribute *attr,
  51. const char *buf, size_t count)
  52. {
  53. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  54. void __iomem *base;
  55. int ret = 0;
  56. u64 v, val;
  57. if (kstrtou64(buf, 0, &val))
  58. return -EINVAL;
  59. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
  60. mutex_lock(&pdata->lock);
  61. writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
  62. v = readq(base + PCIE0_ERROR);
  63. if (val == v)
  64. writeq(v, base + PCIE0_ERROR);
  65. else
  66. ret = -EINVAL;
  67. writeq(0ULL, base + PCIE0_ERROR_MASK);
  68. mutex_unlock(&pdata->lock);
  69. return ret ? ret : count;
  70. }
  71. static DEVICE_ATTR_RW(pcie0_errors);
  72. static ssize_t pcie1_errors_show(struct device *dev,
  73. struct device_attribute *attr, char *buf)
  74. {
  75. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  76. void __iomem *base;
  77. u64 value;
  78. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
  79. mutex_lock(&pdata->lock);
  80. value = readq(base + PCIE1_ERROR);
  81. mutex_unlock(&pdata->lock);
  82. return sprintf(buf, "0x%llx\n", (unsigned long long)value);
  83. }
  84. static ssize_t pcie1_errors_store(struct device *dev,
  85. struct device_attribute *attr,
  86. const char *buf, size_t count)
  87. {
  88. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  89. void __iomem *base;
  90. int ret = 0;
  91. u64 v, val;
  92. if (kstrtou64(buf, 0, &val))
  93. return -EINVAL;
  94. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
  95. mutex_lock(&pdata->lock);
  96. writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
  97. v = readq(base + PCIE1_ERROR);
  98. if (val == v)
  99. writeq(v, base + PCIE1_ERROR);
  100. else
  101. ret = -EINVAL;
  102. writeq(0ULL, base + PCIE1_ERROR_MASK);
  103. mutex_unlock(&pdata->lock);
  104. return ret ? ret : count;
  105. }
  106. static DEVICE_ATTR_RW(pcie1_errors);
  107. static ssize_t nonfatal_errors_show(struct device *dev,
  108. struct device_attribute *attr, char *buf)
  109. {
  110. void __iomem *base;
  111. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
  112. return sprintf(buf, "0x%llx\n",
  113. (unsigned long long)readq(base + RAS_NONFAT_ERROR));
  114. }
  115. static DEVICE_ATTR_RO(nonfatal_errors);
  116. static ssize_t catfatal_errors_show(struct device *dev,
  117. struct device_attribute *attr, char *buf)
  118. {
  119. void __iomem *base;
  120. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
  121. return sprintf(buf, "0x%llx\n",
  122. (unsigned long long)readq(base + RAS_CATFAT_ERROR));
  123. }
  124. static DEVICE_ATTR_RO(catfatal_errors);
  125. static ssize_t inject_errors_show(struct device *dev,
  126. struct device_attribute *attr, char *buf)
  127. {
  128. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  129. void __iomem *base;
  130. u64 v;
  131. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
  132. mutex_lock(&pdata->lock);
  133. v = readq(base + RAS_ERROR_INJECT);
  134. mutex_unlock(&pdata->lock);
  135. return sprintf(buf, "0x%llx\n",
  136. (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
  137. }
  138. static ssize_t inject_errors_store(struct device *dev,
  139. struct device_attribute *attr,
  140. const char *buf, size_t count)
  141. {
  142. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  143. void __iomem *base;
  144. u8 inject_error;
  145. u64 v;
  146. if (kstrtou8(buf, 0, &inject_error))
  147. return -EINVAL;
  148. if (inject_error & ~INJECT_ERROR_MASK)
  149. return -EINVAL;
  150. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
  151. mutex_lock(&pdata->lock);
  152. v = readq(base + RAS_ERROR_INJECT);
  153. v &= ~INJECT_ERROR_MASK;
  154. v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
  155. writeq(v, base + RAS_ERROR_INJECT);
  156. mutex_unlock(&pdata->lock);
  157. return count;
  158. }
  159. static DEVICE_ATTR_RW(inject_errors);
  160. static ssize_t fme_errors_show(struct device *dev,
  161. struct device_attribute *attr, char *buf)
  162. {
  163. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  164. void __iomem *base;
  165. u64 value;
  166. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
  167. mutex_lock(&pdata->lock);
  168. value = readq(base + FME_ERROR);
  169. mutex_unlock(&pdata->lock);
  170. return sprintf(buf, "0x%llx\n", (unsigned long long)value);
  171. }
  172. static ssize_t fme_errors_store(struct device *dev,
  173. struct device_attribute *attr,
  174. const char *buf, size_t count)
  175. {
  176. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  177. void __iomem *base;
  178. u64 v, val;
  179. int ret = 0;
  180. if (kstrtou64(buf, 0, &val))
  181. return -EINVAL;
  182. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
  183. mutex_lock(&pdata->lock);
  184. writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
  185. v = readq(base + FME_ERROR);
  186. if (val == v)
  187. writeq(v, base + FME_ERROR);
  188. else
  189. ret = -EINVAL;
  190. /* Workaround: disable MBP_ERROR if feature revision is 0 */
  191. writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
  192. base + FME_ERROR_MASK);
  193. mutex_unlock(&pdata->lock);
  194. return ret ? ret : count;
  195. }
  196. static DEVICE_ATTR_RW(fme_errors);
  197. static ssize_t first_error_show(struct device *dev,
  198. struct device_attribute *attr, char *buf)
  199. {
  200. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  201. void __iomem *base;
  202. u64 value;
  203. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
  204. mutex_lock(&pdata->lock);
  205. value = readq(base + FME_FIRST_ERROR);
  206. mutex_unlock(&pdata->lock);
  207. return sprintf(buf, "0x%llx\n", (unsigned long long)value);
  208. }
  209. static DEVICE_ATTR_RO(first_error);
  210. static ssize_t next_error_show(struct device *dev,
  211. struct device_attribute *attr, char *buf)
  212. {
  213. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  214. void __iomem *base;
  215. u64 value;
  216. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
  217. mutex_lock(&pdata->lock);
  218. value = readq(base + FME_NEXT_ERROR);
  219. mutex_unlock(&pdata->lock);
  220. return sprintf(buf, "0x%llx\n", (unsigned long long)value);
  221. }
  222. static DEVICE_ATTR_RO(next_error);
  223. static struct attribute *fme_global_err_attrs[] = {
  224. &dev_attr_pcie0_errors.attr,
  225. &dev_attr_pcie1_errors.attr,
  226. &dev_attr_nonfatal_errors.attr,
  227. &dev_attr_catfatal_errors.attr,
  228. &dev_attr_inject_errors.attr,
  229. &dev_attr_fme_errors.attr,
  230. &dev_attr_first_error.attr,
  231. &dev_attr_next_error.attr,
  232. NULL,
  233. };
  234. static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
  235. struct attribute *attr, int n)
  236. {
  237. struct device *dev = kobj_to_dev(kobj);
  238. /*
  239. * sysfs entries are visible only if related private feature is
  240. * enumerated.
  241. */
  242. if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
  243. return 0;
  244. return attr->mode;
  245. }
  246. const struct attribute_group fme_global_err_group = {
  247. .name = "errors",
  248. .attrs = fme_global_err_attrs,
  249. .is_visible = fme_global_err_attrs_visible,
  250. };
  251. static void fme_err_mask(struct device *dev, bool mask)
  252. {
  253. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  254. void __iomem *base;
  255. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
  256. mutex_lock(&pdata->lock);
  257. /* Workaround: keep MBP_ERROR always masked if revision is 0 */
  258. if (dfl_feature_revision(base))
  259. writeq(mask ? ERROR_MASK : 0, base + FME_ERROR_MASK);
  260. else
  261. writeq(mask ? ERROR_MASK : MBP_ERROR, base + FME_ERROR_MASK);
  262. writeq(mask ? ERROR_MASK : 0, base + PCIE0_ERROR_MASK);
  263. writeq(mask ? ERROR_MASK : 0, base + PCIE1_ERROR_MASK);
  264. writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
  265. writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
  266. mutex_unlock(&pdata->lock);
  267. }
  268. static int fme_global_err_init(struct platform_device *pdev,
  269. struct dfl_feature *feature)
  270. {
  271. fme_err_mask(&pdev->dev, false);
  272. return 0;
  273. }
  274. static void fme_global_err_uinit(struct platform_device *pdev,
  275. struct dfl_feature *feature)
  276. {
  277. fme_err_mask(&pdev->dev, true);
  278. }
  279. static long
  280. fme_global_error_ioctl(struct platform_device *pdev,
  281. struct dfl_feature *feature,
  282. unsigned int cmd, unsigned long arg)
  283. {
  284. switch (cmd) {
  285. case DFL_FPGA_FME_ERR_GET_IRQ_NUM:
  286. return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
  287. case DFL_FPGA_FME_ERR_SET_IRQ:
  288. return dfl_feature_ioctl_set_irq(pdev, feature, arg);
  289. default:
  290. dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
  291. return -ENODEV;
  292. }
  293. }
  294. const struct dfl_feature_id fme_global_err_id_table[] = {
  295. {.id = FME_FEATURE_ID_GLOBAL_ERR,},
  296. {0,}
  297. };
  298. const struct dfl_feature_ops fme_global_err_ops = {
  299. .init = fme_global_err_init,
  300. .uinit = fme_global_err_uinit,
  301. .ioctl = fme_global_error_ioctl,
  302. };