brcmstb_dpfe.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * DDR PHY Front End (DPFE) driver for Broadcom set top box SoCs
  4. *
  5. * Copyright (c) 2017 Broadcom
  6. */
  7. /*
  8. * This driver provides access to the DPFE interface of Broadcom STB SoCs.
  9. * The firmware running on the DCPU inside the DDR PHY can provide current
  10. * information about the system's RAM, for instance the DRAM refresh rate.
  11. * This can be used as an indirect indicator for the DRAM's temperature.
  12. * Slower refresh rate means cooler RAM, higher refresh rate means hotter
  13. * RAM.
  14. *
  15. * Throughout the driver, we use readl_relaxed() and writel_relaxed(), which
  16. * already contain the appropriate le32_to_cpu()/cpu_to_le32() calls.
  17. *
  18. * Note regarding the loading of the firmware image: we use be32_to_cpu()
  19. * and le_32_to_cpu(), so we can support the following four cases:
  20. * - LE kernel + LE firmware image (the most common case)
  21. * - LE kernel + BE firmware image
  22. * - BE kernel + LE firmware image
  23. * - BE kernel + BE firmware image
  24. *
  25. * The DPCU always runs in big endian mode. The firmware image, however, can
  26. * be in either format. Also, communication between host CPU and DCPU is
  27. * always in little endian.
  28. */
  29. #include <linux/delay.h>
  30. #include <linux/firmware.h>
  31. #include <linux/io.h>
  32. #include <linux/module.h>
  33. #include <linux/of_address.h>
  34. #include <linux/of_device.h>
  35. #include <linux/platform_device.h>
  36. #define DRVNAME "brcmstb-dpfe"
  37. /* DCPU register offsets */
  38. #define REG_DCPU_RESET 0x0
  39. #define REG_TO_DCPU_MBOX 0x10
  40. #define REG_TO_HOST_MBOX 0x14
  41. /* Macros to process offsets returned by the DCPU */
  42. #define DRAM_MSG_ADDR_OFFSET 0x0
  43. #define DRAM_MSG_TYPE_OFFSET 0x1c
  44. #define DRAM_MSG_ADDR_MASK ((1UL << DRAM_MSG_TYPE_OFFSET) - 1)
  45. #define DRAM_MSG_TYPE_MASK ((1UL << \
  46. (BITS_PER_LONG - DRAM_MSG_TYPE_OFFSET)) - 1)
  47. /* Message RAM */
  48. #define DCPU_MSG_RAM_START 0x100
  49. #define DCPU_MSG_RAM(x) (DCPU_MSG_RAM_START + (x) * sizeof(u32))
  50. /* DRAM Info Offsets & Masks */
  51. #define DRAM_INFO_INTERVAL 0x0
  52. #define DRAM_INFO_MR4 0x4
  53. #define DRAM_INFO_ERROR 0x8
  54. #define DRAM_INFO_MR4_MASK 0xff
  55. #define DRAM_INFO_MR4_SHIFT 24 /* We need to look at byte 3 */
  56. /* DRAM MR4 Offsets & Masks */
  57. #define DRAM_MR4_REFRESH 0x0 /* Refresh rate */
  58. #define DRAM_MR4_SR_ABORT 0x3 /* Self Refresh Abort */
  59. #define DRAM_MR4_PPRE 0x4 /* Post-package repair entry/exit */
  60. #define DRAM_MR4_TH_OFFS 0x5 /* Thermal Offset; vendor specific */
  61. #define DRAM_MR4_TUF 0x7 /* Temperature Update Flag */
  62. #define DRAM_MR4_REFRESH_MASK 0x7
  63. #define DRAM_MR4_SR_ABORT_MASK 0x1
  64. #define DRAM_MR4_PPRE_MASK 0x1
  65. #define DRAM_MR4_TH_OFFS_MASK 0x3
  66. #define DRAM_MR4_TUF_MASK 0x1
  67. /* DRAM Vendor Offsets & Masks (API v2) */
  68. #define DRAM_VENDOR_MR5 0x0
  69. #define DRAM_VENDOR_MR6 0x4
  70. #define DRAM_VENDOR_MR7 0x8
  71. #define DRAM_VENDOR_MR8 0xc
  72. #define DRAM_VENDOR_ERROR 0x10
  73. #define DRAM_VENDOR_MASK 0xff
  74. #define DRAM_VENDOR_SHIFT 24 /* We need to look at byte 3 */
  75. /* DRAM Information Offsets & Masks (API v3) */
  76. #define DRAM_DDR_INFO_MR4 0x0
  77. #define DRAM_DDR_INFO_MR5 0x4
  78. #define DRAM_DDR_INFO_MR6 0x8
  79. #define DRAM_DDR_INFO_MR7 0xc
  80. #define DRAM_DDR_INFO_MR8 0x10
  81. #define DRAM_DDR_INFO_ERROR 0x14
  82. #define DRAM_DDR_INFO_MASK 0xff
  83. /* Reset register bits & masks */
  84. #define DCPU_RESET_SHIFT 0x0
  85. #define DCPU_RESET_MASK 0x1
  86. #define DCPU_CLK_DISABLE_SHIFT 0x2
  87. /* DCPU return codes */
  88. #define DCPU_RET_ERROR_BIT BIT(31)
  89. #define DCPU_RET_SUCCESS 0x1
  90. #define DCPU_RET_ERR_HEADER (DCPU_RET_ERROR_BIT | BIT(0))
  91. #define DCPU_RET_ERR_INVAL (DCPU_RET_ERROR_BIT | BIT(1))
  92. #define DCPU_RET_ERR_CHKSUM (DCPU_RET_ERROR_BIT | BIT(2))
  93. #define DCPU_RET_ERR_COMMAND (DCPU_RET_ERROR_BIT | BIT(3))
  94. /* This error code is not firmware defined and only used in the driver. */
  95. #define DCPU_RET_ERR_TIMEDOUT (DCPU_RET_ERROR_BIT | BIT(4))
  96. /* Firmware magic */
  97. #define DPFE_BE_MAGIC 0xfe1010fe
  98. #define DPFE_LE_MAGIC 0xfe0101fe
  99. /* Error codes */
  100. #define ERR_INVALID_MAGIC -1
  101. #define ERR_INVALID_SIZE -2
  102. #define ERR_INVALID_CHKSUM -3
  103. /* Message types */
  104. #define DPFE_MSG_TYPE_COMMAND 1
  105. #define DPFE_MSG_TYPE_RESPONSE 2
  106. #define DELAY_LOOP_MAX 1000
  107. enum dpfe_msg_fields {
  108. MSG_HEADER,
  109. MSG_COMMAND,
  110. MSG_ARG_COUNT,
  111. MSG_ARG0,
  112. MSG_FIELD_MAX = 16 /* Max number of arguments */
  113. };
  114. enum dpfe_commands {
  115. DPFE_CMD_GET_INFO,
  116. DPFE_CMD_GET_REFRESH,
  117. DPFE_CMD_GET_VENDOR,
  118. DPFE_CMD_MAX /* Last entry */
  119. };
  120. /*
  121. * Format of the binary firmware file:
  122. *
  123. * entry
  124. * 0 header
  125. * value: 0xfe0101fe <== little endian
  126. * 0xfe1010fe <== big endian
  127. * 1 sequence:
  128. * [31:16] total segments on this build
  129. * [15:0] this segment sequence.
  130. * 2 FW version
  131. * 3 IMEM byte size
  132. * 4 DMEM byte size
  133. * IMEM
  134. * DMEM
  135. * last checksum ==> sum of everything
  136. */
  137. struct dpfe_firmware_header {
  138. u32 magic;
  139. u32 sequence;
  140. u32 version;
  141. u32 imem_size;
  142. u32 dmem_size;
  143. };
  144. /* Things we only need during initialization. */
  145. struct init_data {
  146. unsigned int dmem_len;
  147. unsigned int imem_len;
  148. unsigned int chksum;
  149. bool is_big_endian;
  150. };
  151. /* API version and corresponding commands */
  152. struct dpfe_api {
  153. int version;
  154. const char *fw_name;
  155. const struct attribute_group **sysfs_attrs;
  156. u32 command[DPFE_CMD_MAX][MSG_FIELD_MAX];
  157. };
  158. /* Things we need for as long as we are active. */
  159. struct brcmstb_dpfe_priv {
  160. void __iomem *regs;
  161. void __iomem *dmem;
  162. void __iomem *imem;
  163. struct device *dev;
  164. const struct dpfe_api *dpfe_api;
  165. struct mutex lock;
  166. };
  167. /*
  168. * Forward declaration of our sysfs attribute functions, so we can declare the
  169. * attribute data structures early.
  170. */
  171. static ssize_t show_info(struct device *, struct device_attribute *, char *);
  172. static ssize_t show_refresh(struct device *, struct device_attribute *, char *);
  173. static ssize_t store_refresh(struct device *, struct device_attribute *,
  174. const char *, size_t);
  175. static ssize_t show_vendor(struct device *, struct device_attribute *, char *);
  176. static ssize_t show_dram(struct device *, struct device_attribute *, char *);
  177. /*
  178. * Declare our attributes early, so they can be referenced in the API data
  179. * structure. We need to do this, because the attributes depend on the API
  180. * version.
  181. */
  182. static DEVICE_ATTR(dpfe_info, 0444, show_info, NULL);
  183. static DEVICE_ATTR(dpfe_refresh, 0644, show_refresh, store_refresh);
  184. static DEVICE_ATTR(dpfe_vendor, 0444, show_vendor, NULL);
  185. static DEVICE_ATTR(dpfe_dram, 0444, show_dram, NULL);
  186. /* API v2 sysfs attributes */
  187. static struct attribute *dpfe_v2_attrs[] = {
  188. &dev_attr_dpfe_info.attr,
  189. &dev_attr_dpfe_refresh.attr,
  190. &dev_attr_dpfe_vendor.attr,
  191. NULL
  192. };
  193. ATTRIBUTE_GROUPS(dpfe_v2);
  194. /* API v3 sysfs attributes */
  195. static struct attribute *dpfe_v3_attrs[] = {
  196. &dev_attr_dpfe_info.attr,
  197. &dev_attr_dpfe_dram.attr,
  198. NULL
  199. };
  200. ATTRIBUTE_GROUPS(dpfe_v3);
  201. /*
  202. * Old API v2 firmware commands, as defined in the rev 0.61 specification, we
  203. * use a version set to 1 to denote that it is not compatible with the new API
  204. * v2 and onwards.
  205. */
  206. static const struct dpfe_api dpfe_api_old_v2 = {
  207. .version = 1,
  208. .fw_name = "dpfe.bin",
  209. .sysfs_attrs = dpfe_v2_groups,
  210. .command = {
  211. [DPFE_CMD_GET_INFO] = {
  212. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  213. [MSG_COMMAND] = 1,
  214. [MSG_ARG_COUNT] = 1,
  215. [MSG_ARG0] = 1,
  216. },
  217. [DPFE_CMD_GET_REFRESH] = {
  218. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  219. [MSG_COMMAND] = 2,
  220. [MSG_ARG_COUNT] = 1,
  221. [MSG_ARG0] = 1,
  222. },
  223. [DPFE_CMD_GET_VENDOR] = {
  224. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  225. [MSG_COMMAND] = 2,
  226. [MSG_ARG_COUNT] = 1,
  227. [MSG_ARG0] = 2,
  228. },
  229. }
  230. };
  231. /*
  232. * API v2 firmware commands, as defined in the rev 0.8 specification, named new
  233. * v2 here
  234. */
  235. static const struct dpfe_api dpfe_api_new_v2 = {
  236. .version = 2,
  237. .fw_name = NULL, /* We expect the firmware to have been downloaded! */
  238. .sysfs_attrs = dpfe_v2_groups,
  239. .command = {
  240. [DPFE_CMD_GET_INFO] = {
  241. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  242. [MSG_COMMAND] = 0x101,
  243. },
  244. [DPFE_CMD_GET_REFRESH] = {
  245. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  246. [MSG_COMMAND] = 0x201,
  247. },
  248. [DPFE_CMD_GET_VENDOR] = {
  249. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  250. [MSG_COMMAND] = 0x202,
  251. },
  252. }
  253. };
  254. /* API v3 firmware commands */
  255. static const struct dpfe_api dpfe_api_v3 = {
  256. .version = 3,
  257. .fw_name = NULL, /* We expect the firmware to have been downloaded! */
  258. .sysfs_attrs = dpfe_v3_groups,
  259. .command = {
  260. [DPFE_CMD_GET_INFO] = {
  261. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  262. [MSG_COMMAND] = 0x0101,
  263. [MSG_ARG_COUNT] = 1,
  264. [MSG_ARG0] = 1,
  265. },
  266. [DPFE_CMD_GET_REFRESH] = {
  267. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  268. [MSG_COMMAND] = 0x0202,
  269. [MSG_ARG_COUNT] = 0,
  270. },
  271. /* There's no GET_VENDOR command in API v3. */
  272. },
  273. };
  274. static const char *get_error_text(unsigned int i)
  275. {
  276. static const char * const error_text[] = {
  277. "Success", "Header code incorrect",
  278. "Unknown command or argument", "Incorrect checksum",
  279. "Malformed command", "Timed out", "Unknown error",
  280. };
  281. if (unlikely(i >= ARRAY_SIZE(error_text)))
  282. i = ARRAY_SIZE(error_text) - 1;
  283. return error_text[i];
  284. }
  285. static bool is_dcpu_enabled(struct brcmstb_dpfe_priv *priv)
  286. {
  287. u32 val;
  288. mutex_lock(&priv->lock);
  289. val = readl_relaxed(priv->regs + REG_DCPU_RESET);
  290. mutex_unlock(&priv->lock);
  291. return !(val & DCPU_RESET_MASK);
  292. }
  293. static void __disable_dcpu(struct brcmstb_dpfe_priv *priv)
  294. {
  295. u32 val;
  296. if (!is_dcpu_enabled(priv))
  297. return;
  298. mutex_lock(&priv->lock);
  299. /* Put DCPU in reset if it's running. */
  300. val = readl_relaxed(priv->regs + REG_DCPU_RESET);
  301. val |= (1 << DCPU_RESET_SHIFT);
  302. writel_relaxed(val, priv->regs + REG_DCPU_RESET);
  303. mutex_unlock(&priv->lock);
  304. }
  305. static void __enable_dcpu(struct brcmstb_dpfe_priv *priv)
  306. {
  307. void __iomem *regs = priv->regs;
  308. u32 val;
  309. mutex_lock(&priv->lock);
  310. /* Clear mailbox registers. */
  311. writel_relaxed(0, regs + REG_TO_DCPU_MBOX);
  312. writel_relaxed(0, regs + REG_TO_HOST_MBOX);
  313. /* Disable DCPU clock gating */
  314. val = readl_relaxed(regs + REG_DCPU_RESET);
  315. val &= ~(1 << DCPU_CLK_DISABLE_SHIFT);
  316. writel_relaxed(val, regs + REG_DCPU_RESET);
  317. /* Take DCPU out of reset */
  318. val = readl_relaxed(regs + REG_DCPU_RESET);
  319. val &= ~(1 << DCPU_RESET_SHIFT);
  320. writel_relaxed(val, regs + REG_DCPU_RESET);
  321. mutex_unlock(&priv->lock);
  322. }
  323. static unsigned int get_msg_chksum(const u32 msg[], unsigned int max)
  324. {
  325. unsigned int sum = 0;
  326. unsigned int i;
  327. /* Don't include the last field in the checksum. */
  328. for (i = 0; i < max; i++)
  329. sum += msg[i];
  330. return sum;
  331. }
  332. static void __iomem *get_msg_ptr(struct brcmstb_dpfe_priv *priv, u32 response,
  333. char *buf, ssize_t *size)
  334. {
  335. unsigned int msg_type;
  336. unsigned int offset;
  337. void __iomem *ptr = NULL;
  338. /* There is no need to use this function for API v3 or later. */
  339. if (unlikely(priv->dpfe_api->version >= 3))
  340. return NULL;
  341. msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
  342. offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
  343. /*
  344. * msg_type == 1: the offset is relative to the message RAM
  345. * msg_type == 0: the offset is relative to the data RAM (this is the
  346. * previous way of passing data)
  347. * msg_type is anything else: there's critical hardware problem
  348. */
  349. switch (msg_type) {
  350. case 1:
  351. ptr = priv->regs + DCPU_MSG_RAM_START + offset;
  352. break;
  353. case 0:
  354. ptr = priv->dmem + offset;
  355. break;
  356. default:
  357. dev_emerg(priv->dev, "invalid message reply from DCPU: %#x\n",
  358. response);
  359. if (buf && size)
  360. *size = sprintf(buf,
  361. "FATAL: communication error with DCPU\n");
  362. }
  363. return ptr;
  364. }
  365. static void __finalize_command(struct brcmstb_dpfe_priv *priv)
  366. {
  367. unsigned int release_mbox;
  368. /*
  369. * It depends on the API version which MBOX register we have to write to
  370. * to signal we are done.
  371. */
  372. release_mbox = (priv->dpfe_api->version < 2)
  373. ? REG_TO_HOST_MBOX : REG_TO_DCPU_MBOX;
  374. writel_relaxed(0, priv->regs + release_mbox);
  375. }
  376. static int __send_command(struct brcmstb_dpfe_priv *priv, unsigned int cmd,
  377. u32 result[])
  378. {
  379. const u32 *msg = priv->dpfe_api->command[cmd];
  380. void __iomem *regs = priv->regs;
  381. unsigned int i, chksum, chksum_idx;
  382. int ret = 0;
  383. u32 resp;
  384. if (cmd >= DPFE_CMD_MAX)
  385. return -1;
  386. mutex_lock(&priv->lock);
  387. /* Wait for DCPU to become ready */
  388. for (i = 0; i < DELAY_LOOP_MAX; i++) {
  389. resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
  390. if (resp == 0)
  391. break;
  392. msleep(1);
  393. }
  394. if (resp != 0) {
  395. mutex_unlock(&priv->lock);
  396. return -ffs(DCPU_RET_ERR_TIMEDOUT);
  397. }
  398. /* Compute checksum over the message */
  399. chksum_idx = msg[MSG_ARG_COUNT] + MSG_ARG_COUNT + 1;
  400. chksum = get_msg_chksum(msg, chksum_idx);
  401. /* Write command and arguments to message area */
  402. for (i = 0; i < MSG_FIELD_MAX; i++) {
  403. if (i == chksum_idx)
  404. writel_relaxed(chksum, regs + DCPU_MSG_RAM(i));
  405. else
  406. writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i));
  407. }
  408. /* Tell DCPU there is a command waiting */
  409. writel_relaxed(1, regs + REG_TO_DCPU_MBOX);
  410. /* Wait for DCPU to process the command */
  411. for (i = 0; i < DELAY_LOOP_MAX; i++) {
  412. /* Read response code */
  413. resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
  414. if (resp > 0)
  415. break;
  416. msleep(1);
  417. }
  418. if (i == DELAY_LOOP_MAX) {
  419. resp = (DCPU_RET_ERR_TIMEDOUT & ~DCPU_RET_ERROR_BIT);
  420. ret = -ffs(resp);
  421. } else {
  422. /* Read response data */
  423. for (i = 0; i < MSG_FIELD_MAX; i++)
  424. result[i] = readl_relaxed(regs + DCPU_MSG_RAM(i));
  425. chksum_idx = result[MSG_ARG_COUNT] + MSG_ARG_COUNT + 1;
  426. }
  427. /* Tell DCPU we are done */
  428. __finalize_command(priv);
  429. mutex_unlock(&priv->lock);
  430. if (ret)
  431. return ret;
  432. /* Verify response */
  433. chksum = get_msg_chksum(result, chksum_idx);
  434. if (chksum != result[chksum_idx])
  435. resp = DCPU_RET_ERR_CHKSUM;
  436. if (resp != DCPU_RET_SUCCESS) {
  437. resp &= ~DCPU_RET_ERROR_BIT;
  438. ret = -ffs(resp);
  439. }
  440. return ret;
  441. }
  442. /* Ensure that the firmware file loaded meets all the requirements. */
  443. static int __verify_firmware(struct init_data *init,
  444. const struct firmware *fw)
  445. {
  446. const struct dpfe_firmware_header *header = (void *)fw->data;
  447. unsigned int dmem_size, imem_size, total_size;
  448. bool is_big_endian = false;
  449. const u32 *chksum_ptr;
  450. if (header->magic == DPFE_BE_MAGIC)
  451. is_big_endian = true;
  452. else if (header->magic != DPFE_LE_MAGIC)
  453. return ERR_INVALID_MAGIC;
  454. if (is_big_endian) {
  455. dmem_size = be32_to_cpu(header->dmem_size);
  456. imem_size = be32_to_cpu(header->imem_size);
  457. } else {
  458. dmem_size = le32_to_cpu(header->dmem_size);
  459. imem_size = le32_to_cpu(header->imem_size);
  460. }
  461. /* Data and instruction sections are 32 bit words. */
  462. if ((dmem_size % sizeof(u32)) != 0 || (imem_size % sizeof(u32)) != 0)
  463. return ERR_INVALID_SIZE;
  464. /*
  465. * The header + the data section + the instruction section + the
  466. * checksum must be equal to the total firmware size.
  467. */
  468. total_size = dmem_size + imem_size + sizeof(*header) +
  469. sizeof(*chksum_ptr);
  470. if (total_size != fw->size)
  471. return ERR_INVALID_SIZE;
  472. /* The checksum comes at the very end. */
  473. chksum_ptr = (void *)fw->data + sizeof(*header) + dmem_size + imem_size;
  474. init->is_big_endian = is_big_endian;
  475. init->dmem_len = dmem_size;
  476. init->imem_len = imem_size;
  477. init->chksum = (is_big_endian)
  478. ? be32_to_cpu(*chksum_ptr) : le32_to_cpu(*chksum_ptr);
  479. return 0;
  480. }
  481. /* Verify checksum by reading back the firmware from co-processor RAM. */
  482. static int __verify_fw_checksum(struct init_data *init,
  483. struct brcmstb_dpfe_priv *priv,
  484. const struct dpfe_firmware_header *header,
  485. u32 checksum)
  486. {
  487. u32 magic, sequence, version, sum;
  488. u32 __iomem *dmem = priv->dmem;
  489. u32 __iomem *imem = priv->imem;
  490. unsigned int i;
  491. if (init->is_big_endian) {
  492. magic = be32_to_cpu(header->magic);
  493. sequence = be32_to_cpu(header->sequence);
  494. version = be32_to_cpu(header->version);
  495. } else {
  496. magic = le32_to_cpu(header->magic);
  497. sequence = le32_to_cpu(header->sequence);
  498. version = le32_to_cpu(header->version);
  499. }
  500. sum = magic + sequence + version + init->dmem_len + init->imem_len;
  501. for (i = 0; i < init->dmem_len / sizeof(u32); i++)
  502. sum += readl_relaxed(dmem + i);
  503. for (i = 0; i < init->imem_len / sizeof(u32); i++)
  504. sum += readl_relaxed(imem + i);
  505. return (sum == checksum) ? 0 : -1;
  506. }
  507. static int __write_firmware(u32 __iomem *mem, const u32 *fw,
  508. unsigned int size, bool is_big_endian)
  509. {
  510. unsigned int i;
  511. /* Convert size to 32-bit words. */
  512. size /= sizeof(u32);
  513. /* It is recommended to clear the firmware area first. */
  514. for (i = 0; i < size; i++)
  515. writel_relaxed(0, mem + i);
  516. /* Now copy it. */
  517. if (is_big_endian) {
  518. for (i = 0; i < size; i++)
  519. writel_relaxed(be32_to_cpu(fw[i]), mem + i);
  520. } else {
  521. for (i = 0; i < size; i++)
  522. writel_relaxed(le32_to_cpu(fw[i]), mem + i);
  523. }
  524. return 0;
  525. }
  526. static int brcmstb_dpfe_download_firmware(struct brcmstb_dpfe_priv *priv)
  527. {
  528. const struct dpfe_firmware_header *header;
  529. unsigned int dmem_size, imem_size;
  530. struct device *dev = priv->dev;
  531. bool is_big_endian = false;
  532. const struct firmware *fw;
  533. const u32 *dmem, *imem;
  534. struct init_data init;
  535. const void *fw_blob;
  536. int ret;
  537. /*
  538. * Skip downloading the firmware if the DCPU is already running and
  539. * responding to commands.
  540. */
  541. if (is_dcpu_enabled(priv)) {
  542. u32 response[MSG_FIELD_MAX];
  543. ret = __send_command(priv, DPFE_CMD_GET_INFO, response);
  544. if (!ret)
  545. return 0;
  546. }
  547. /*
  548. * If the firmware filename is NULL it means the boot firmware has to
  549. * download the DCPU firmware for us. If that didn't work, we have to
  550. * bail, since downloading it ourselves wouldn't work either.
  551. */
  552. if (!priv->dpfe_api->fw_name)
  553. return -ENODEV;
  554. ret = firmware_request_nowarn(&fw, priv->dpfe_api->fw_name, dev);
  555. /*
  556. * Defer the firmware download if the firmware file couldn't be found.
  557. * The root file system may not be available yet.
  558. */
  559. if (ret)
  560. return (ret == -ENOENT) ? -EPROBE_DEFER : ret;
  561. ret = __verify_firmware(&init, fw);
  562. if (ret) {
  563. ret = -EFAULT;
  564. goto release_fw;
  565. }
  566. __disable_dcpu(priv);
  567. is_big_endian = init.is_big_endian;
  568. dmem_size = init.dmem_len;
  569. imem_size = init.imem_len;
  570. /* At the beginning of the firmware blob is a header. */
  571. header = (struct dpfe_firmware_header *)fw->data;
  572. /* Void pointer to the beginning of the actual firmware. */
  573. fw_blob = fw->data + sizeof(*header);
  574. /* IMEM comes right after the header. */
  575. imem = fw_blob;
  576. /* DMEM follows after IMEM. */
  577. dmem = fw_blob + imem_size;
  578. ret = __write_firmware(priv->dmem, dmem, dmem_size, is_big_endian);
  579. if (ret)
  580. goto release_fw;
  581. ret = __write_firmware(priv->imem, imem, imem_size, is_big_endian);
  582. if (ret)
  583. goto release_fw;
  584. ret = __verify_fw_checksum(&init, priv, header, init.chksum);
  585. if (ret)
  586. goto release_fw;
  587. __enable_dcpu(priv);
  588. release_fw:
  589. release_firmware(fw);
  590. return ret;
  591. }
  592. static ssize_t generic_show(unsigned int command, u32 response[],
  593. struct brcmstb_dpfe_priv *priv, char *buf)
  594. {
  595. int ret;
  596. if (!priv)
  597. return sprintf(buf, "ERROR: driver private data not set\n");
  598. ret = __send_command(priv, command, response);
  599. if (ret < 0)
  600. return sprintf(buf, "ERROR: %s\n", get_error_text(-ret));
  601. return 0;
  602. }
  603. static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
  604. char *buf)
  605. {
  606. u32 response[MSG_FIELD_MAX];
  607. struct brcmstb_dpfe_priv *priv;
  608. unsigned int info;
  609. ssize_t ret;
  610. priv = dev_get_drvdata(dev);
  611. ret = generic_show(DPFE_CMD_GET_INFO, response, priv, buf);
  612. if (ret)
  613. return ret;
  614. info = response[MSG_ARG0];
  615. return sprintf(buf, "%u.%u.%u.%u\n",
  616. (info >> 24) & 0xff,
  617. (info >> 16) & 0xff,
  618. (info >> 8) & 0xff,
  619. info & 0xff);
  620. }
  621. static ssize_t show_refresh(struct device *dev,
  622. struct device_attribute *devattr, char *buf)
  623. {
  624. u32 response[MSG_FIELD_MAX];
  625. void __iomem *info;
  626. struct brcmstb_dpfe_priv *priv;
  627. u8 refresh, sr_abort, ppre, thermal_offs, tuf;
  628. u32 mr4;
  629. ssize_t ret;
  630. priv = dev_get_drvdata(dev);
  631. ret = generic_show(DPFE_CMD_GET_REFRESH, response, priv, buf);
  632. if (ret)
  633. return ret;
  634. info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
  635. if (!info)
  636. return ret;
  637. mr4 = (readl_relaxed(info + DRAM_INFO_MR4) >> DRAM_INFO_MR4_SHIFT) &
  638. DRAM_INFO_MR4_MASK;
  639. refresh = (mr4 >> DRAM_MR4_REFRESH) & DRAM_MR4_REFRESH_MASK;
  640. sr_abort = (mr4 >> DRAM_MR4_SR_ABORT) & DRAM_MR4_SR_ABORT_MASK;
  641. ppre = (mr4 >> DRAM_MR4_PPRE) & DRAM_MR4_PPRE_MASK;
  642. thermal_offs = (mr4 >> DRAM_MR4_TH_OFFS) & DRAM_MR4_TH_OFFS_MASK;
  643. tuf = (mr4 >> DRAM_MR4_TUF) & DRAM_MR4_TUF_MASK;
  644. return sprintf(buf, "%#x %#x %#x %#x %#x %#x %#x\n",
  645. readl_relaxed(info + DRAM_INFO_INTERVAL),
  646. refresh, sr_abort, ppre, thermal_offs, tuf,
  647. readl_relaxed(info + DRAM_INFO_ERROR));
  648. }
  649. static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
  650. const char *buf, size_t count)
  651. {
  652. u32 response[MSG_FIELD_MAX];
  653. struct brcmstb_dpfe_priv *priv;
  654. void __iomem *info;
  655. unsigned long val;
  656. int ret;
  657. if (kstrtoul(buf, 0, &val) < 0)
  658. return -EINVAL;
  659. priv = dev_get_drvdata(dev);
  660. ret = __send_command(priv, DPFE_CMD_GET_REFRESH, response);
  661. if (ret)
  662. return ret;
  663. info = get_msg_ptr(priv, response[MSG_ARG0], NULL, NULL);
  664. if (!info)
  665. return -EIO;
  666. writel_relaxed(val, info + DRAM_INFO_INTERVAL);
  667. return count;
  668. }
  669. static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
  670. char *buf)
  671. {
  672. u32 response[MSG_FIELD_MAX];
  673. struct brcmstb_dpfe_priv *priv;
  674. void __iomem *info;
  675. ssize_t ret;
  676. u32 mr5, mr6, mr7, mr8, err;
  677. priv = dev_get_drvdata(dev);
  678. ret = generic_show(DPFE_CMD_GET_VENDOR, response, priv, buf);
  679. if (ret)
  680. return ret;
  681. info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
  682. if (!info)
  683. return ret;
  684. mr5 = (readl_relaxed(info + DRAM_VENDOR_MR5) >> DRAM_VENDOR_SHIFT) &
  685. DRAM_VENDOR_MASK;
  686. mr6 = (readl_relaxed(info + DRAM_VENDOR_MR6) >> DRAM_VENDOR_SHIFT) &
  687. DRAM_VENDOR_MASK;
  688. mr7 = (readl_relaxed(info + DRAM_VENDOR_MR7) >> DRAM_VENDOR_SHIFT) &
  689. DRAM_VENDOR_MASK;
  690. mr8 = (readl_relaxed(info + DRAM_VENDOR_MR8) >> DRAM_VENDOR_SHIFT) &
  691. DRAM_VENDOR_MASK;
  692. err = readl_relaxed(info + DRAM_VENDOR_ERROR) & DRAM_VENDOR_MASK;
  693. return sprintf(buf, "%#x %#x %#x %#x %#x\n", mr5, mr6, mr7, mr8, err);
  694. }
  695. static ssize_t show_dram(struct device *dev, struct device_attribute *devattr,
  696. char *buf)
  697. {
  698. u32 response[MSG_FIELD_MAX];
  699. struct brcmstb_dpfe_priv *priv;
  700. ssize_t ret;
  701. u32 mr4, mr5, mr6, mr7, mr8, err;
  702. priv = dev_get_drvdata(dev);
  703. ret = generic_show(DPFE_CMD_GET_REFRESH, response, priv, buf);
  704. if (ret)
  705. return ret;
  706. mr4 = response[MSG_ARG0 + 0] & DRAM_INFO_MR4_MASK;
  707. mr5 = response[MSG_ARG0 + 1] & DRAM_DDR_INFO_MASK;
  708. mr6 = response[MSG_ARG0 + 2] & DRAM_DDR_INFO_MASK;
  709. mr7 = response[MSG_ARG0 + 3] & DRAM_DDR_INFO_MASK;
  710. mr8 = response[MSG_ARG0 + 4] & DRAM_DDR_INFO_MASK;
  711. err = response[MSG_ARG0 + 5] & DRAM_DDR_INFO_MASK;
  712. return sprintf(buf, "%#x %#x %#x %#x %#x %#x\n", mr4, mr5, mr6, mr7,
  713. mr8, err);
  714. }
  715. static int brcmstb_dpfe_resume(struct platform_device *pdev)
  716. {
  717. struct brcmstb_dpfe_priv *priv = platform_get_drvdata(pdev);
  718. return brcmstb_dpfe_download_firmware(priv);
  719. }
  720. static int brcmstb_dpfe_probe(struct platform_device *pdev)
  721. {
  722. struct device *dev = &pdev->dev;
  723. struct brcmstb_dpfe_priv *priv;
  724. struct resource *res;
  725. int ret;
  726. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  727. if (!priv)
  728. return -ENOMEM;
  729. priv->dev = dev;
  730. mutex_init(&priv->lock);
  731. platform_set_drvdata(pdev, priv);
  732. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-cpu");
  733. priv->regs = devm_ioremap_resource(dev, res);
  734. if (IS_ERR(priv->regs)) {
  735. dev_err(dev, "couldn't map DCPU registers\n");
  736. return -ENODEV;
  737. }
  738. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-dmem");
  739. priv->dmem = devm_ioremap_resource(dev, res);
  740. if (IS_ERR(priv->dmem)) {
  741. dev_err(dev, "Couldn't map DCPU data memory\n");
  742. return -ENOENT;
  743. }
  744. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-imem");
  745. priv->imem = devm_ioremap_resource(dev, res);
  746. if (IS_ERR(priv->imem)) {
  747. dev_err(dev, "Couldn't map DCPU instruction memory\n");
  748. return -ENOENT;
  749. }
  750. priv->dpfe_api = of_device_get_match_data(dev);
  751. if (unlikely(!priv->dpfe_api)) {
  752. /*
  753. * It should be impossible to end up here, but to be safe we
  754. * check anyway.
  755. */
  756. dev_err(dev, "Couldn't determine API\n");
  757. return -ENOENT;
  758. }
  759. ret = brcmstb_dpfe_download_firmware(priv);
  760. if (ret)
  761. return dev_err_probe(dev, ret, "Couldn't download firmware\n");
  762. ret = sysfs_create_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
  763. if (!ret)
  764. dev_info(dev, "registered with API v%d.\n",
  765. priv->dpfe_api->version);
  766. return ret;
  767. }
  768. static int brcmstb_dpfe_remove(struct platform_device *pdev)
  769. {
  770. struct brcmstb_dpfe_priv *priv = dev_get_drvdata(&pdev->dev);
  771. sysfs_remove_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
  772. return 0;
  773. }
  774. static const struct of_device_id brcmstb_dpfe_of_match[] = {
  775. /* Use legacy API v2 for a select number of chips */
  776. { .compatible = "brcm,bcm7268-dpfe-cpu", .data = &dpfe_api_old_v2 },
  777. { .compatible = "brcm,bcm7271-dpfe-cpu", .data = &dpfe_api_old_v2 },
  778. { .compatible = "brcm,bcm7278-dpfe-cpu", .data = &dpfe_api_old_v2 },
  779. { .compatible = "brcm,bcm7211-dpfe-cpu", .data = &dpfe_api_new_v2 },
  780. /* API v3 is the default going forward */
  781. { .compatible = "brcm,dpfe-cpu", .data = &dpfe_api_v3 },
  782. {}
  783. };
  784. MODULE_DEVICE_TABLE(of, brcmstb_dpfe_of_match);
  785. static struct platform_driver brcmstb_dpfe_driver = {
  786. .driver = {
  787. .name = DRVNAME,
  788. .of_match_table = brcmstb_dpfe_of_match,
  789. },
  790. .probe = brcmstb_dpfe_probe,
  791. .remove = brcmstb_dpfe_remove,
  792. .resume = brcmstb_dpfe_resume,
  793. };
  794. module_platform_driver(brcmstb_dpfe_driver);
  795. MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
  796. MODULE_DESCRIPTION("BRCMSTB DDR PHY Front End Driver");
  797. MODULE_LICENSE("GPL");