ti_k3_r5f_rproc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Texas Instruments' K3 R5 Remoteproc driver
  4. *
  5. * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
  6. * Lokesh Vutla <lokeshvutla@ti.com>
  7. */
  8. #include <common.h>
  9. #include <dm.h>
  10. #include <malloc.h>
  11. #include <remoteproc.h>
  12. #include <errno.h>
  13. #include <clk.h>
  14. #include <reset.h>
  15. #include <asm/io.h>
  16. #include <dm/device_compat.h>
  17. #include <linux/err.h>
  18. #include <linux/kernel.h>
  19. #include <linux/soc/ti/ti_sci_protocol.h>
  20. #include "ti_sci_proc.h"
  21. /*
  22. * R5F's view of this address can either be for ATCM or BTCM with the other
  23. * at address 0x0 based on loczrama signal.
  24. */
  25. #define K3_R5_TCM_DEV_ADDR 0x41010000
  26. /* R5 TI-SCI Processor Configuration Flags */
  27. #define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
  28. #define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
  29. #define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
  30. #define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
  31. #define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
  32. #define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
  33. #define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
  34. #define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
  35. #define PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR 0x10000000
  36. /* R5 TI-SCI Processor Control Flags */
  37. #define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
  38. /* R5 TI-SCI Processor Status Flags */
  39. #define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
  40. #define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
  41. #define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
  42. #define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
  43. #define NR_CORES 2
  44. enum cluster_mode {
  45. CLUSTER_MODE_SPLIT = 0,
  46. CLUSTER_MODE_LOCKSTEP,
  47. };
  48. /**
  49. * struct k3_r5_mem - internal memory structure
  50. * @cpu_addr: MPU virtual address of the memory region
  51. * @bus_addr: Bus address used to access the memory region
  52. * @dev_addr: Device address from remoteproc view
  53. * @size: Size of the memory region
  54. */
  55. struct k3_r5f_mem {
  56. void __iomem *cpu_addr;
  57. phys_addr_t bus_addr;
  58. u32 dev_addr;
  59. size_t size;
  60. };
  61. /**
  62. * struct k3_r5f_core - K3 R5 core structure
  63. * @dev: cached device pointer
  64. * @cluster: pointer to the parent cluster.
  65. * @reset: reset control handle
  66. * @tsp: TI-SCI processor control handle
  67. * @mem: Array of available internal memories
  68. * @num_mem: Number of available memories
  69. * @atcm_enable: flag to control ATCM enablement
  70. * @btcm_enable: flag to control BTCM enablement
  71. * @loczrama: flag to dictate which TCM is at device address 0x0
  72. * @in_use: flag to tell if the core is already in use.
  73. */
  74. struct k3_r5f_core {
  75. struct udevice *dev;
  76. struct k3_r5f_cluster *cluster;
  77. struct reset_ctl reset;
  78. struct ti_sci_proc tsp;
  79. struct k3_r5f_mem *mem;
  80. int num_mems;
  81. u32 atcm_enable;
  82. u32 btcm_enable;
  83. u32 loczrama;
  84. bool in_use;
  85. };
  86. /**
  87. * struct k3_r5f_cluster - K3 R5F Cluster structure
  88. * @mode: Mode to configure the Cluster - Split or LockStep
  89. * @cores: Array of pointers to R5 cores within the cluster
  90. */
  91. struct k3_r5f_cluster {
  92. enum cluster_mode mode;
  93. struct k3_r5f_core *cores[NR_CORES];
  94. };
  95. static bool is_primary_core(struct k3_r5f_core *core)
  96. {
  97. return core == core->cluster->cores[0];
  98. }
  99. static int k3_r5f_proc_request(struct k3_r5f_core *core)
  100. {
  101. struct k3_r5f_cluster *cluster = core->cluster;
  102. int i, ret;
  103. if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
  104. for (i = 0; i < NR_CORES; i++) {
  105. ret = ti_sci_proc_request(&cluster->cores[i]->tsp);
  106. if (ret)
  107. goto proc_release;
  108. }
  109. } else {
  110. ret = ti_sci_proc_request(&core->tsp);
  111. }
  112. return 0;
  113. proc_release:
  114. while (i >= 0) {
  115. ti_sci_proc_release(&cluster->cores[i]->tsp);
  116. i--;
  117. }
  118. return ret;
  119. }
  120. static void k3_r5f_proc_release(struct k3_r5f_core *core)
  121. {
  122. struct k3_r5f_cluster *cluster = core->cluster;
  123. int i;
  124. if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
  125. for (i = 0; i < NR_CORES; i++)
  126. ti_sci_proc_release(&cluster->cores[i]->tsp);
  127. else
  128. ti_sci_proc_release(&core->tsp);
  129. }
  130. static int k3_r5f_lockstep_release(struct k3_r5f_cluster *cluster)
  131. {
  132. int ret, c;
  133. dev_dbg(dev, "%s\n", __func__);
  134. for (c = NR_CORES - 1; c >= 0; c--) {
  135. ret = ti_sci_proc_power_domain_on(&cluster->cores[c]->tsp);
  136. if (ret)
  137. goto unroll_module_reset;
  138. }
  139. /* deassert local reset on all applicable cores */
  140. for (c = NR_CORES - 1; c >= 0; c--) {
  141. ret = reset_deassert(&cluster->cores[c]->reset);
  142. if (ret)
  143. goto unroll_local_reset;
  144. }
  145. return 0;
  146. unroll_local_reset:
  147. while (c < NR_CORES) {
  148. reset_assert(&cluster->cores[c]->reset);
  149. c++;
  150. }
  151. c = 0;
  152. unroll_module_reset:
  153. while (c < NR_CORES) {
  154. ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp);
  155. c++;
  156. }
  157. return ret;
  158. }
  159. static int k3_r5f_split_release(struct k3_r5f_core *core)
  160. {
  161. int ret;
  162. dev_dbg(dev, "%s\n", __func__);
  163. ret = ti_sci_proc_power_domain_on(&core->tsp);
  164. if (ret) {
  165. dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
  166. ret);
  167. return ret;
  168. }
  169. ret = reset_deassert(&core->reset);
  170. if (ret) {
  171. dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
  172. ret);
  173. if (ti_sci_proc_power_domain_off(&core->tsp))
  174. dev_warn(core->dev, "module-reset assert back failed\n");
  175. }
  176. return ret;
  177. }
  178. static int k3_r5f_prepare(struct udevice *dev)
  179. {
  180. struct k3_r5f_core *core = dev_get_priv(dev);
  181. struct k3_r5f_cluster *cluster = core->cluster;
  182. int ret = 0;
  183. dev_dbg(dev, "%s\n", __func__);
  184. if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
  185. ret = k3_r5f_lockstep_release(cluster);
  186. else
  187. ret = k3_r5f_split_release(core);
  188. if (ret)
  189. dev_err(dev, "Unable to enable cores for TCM loading %d\n",
  190. ret);
  191. return ret;
  192. }
  193. static int k3_r5f_core_sanity_check(struct k3_r5f_core *core)
  194. {
  195. struct k3_r5f_cluster *cluster = core->cluster;
  196. if (core->in_use) {
  197. dev_err(dev, "Invalid op: Trying to load/start on already running core %d\n",
  198. core->tsp.proc_id);
  199. return -EINVAL;
  200. }
  201. if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !cluster->cores[1]) {
  202. printf("Secondary core is not probed in this cluster\n");
  203. return -EAGAIN;
  204. }
  205. if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !is_primary_core(core)) {
  206. dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
  207. core->tsp.proc_id);
  208. return -EINVAL;
  209. }
  210. if (cluster->mode == CLUSTER_MODE_SPLIT && !is_primary_core(core)) {
  211. if (!core->cluster->cores[0]->in_use) {
  212. dev_err(dev, "Invalid seq: Enable primary core before loading secondary core\n");
  213. return -EINVAL;
  214. }
  215. }
  216. return 0;
  217. }
  218. /**
  219. * k3_r5f_load() - Load up the Remote processor image
  220. * @dev: rproc device pointer
  221. * @addr: Address at which image is available
  222. * @size: size of the image
  223. *
  224. * Return: 0 if all goes good, else appropriate error message.
  225. */
  226. static int k3_r5f_load(struct udevice *dev, ulong addr, ulong size)
  227. {
  228. struct k3_r5f_core *core = dev_get_priv(dev);
  229. u32 boot_vector;
  230. int ret;
  231. dev_dbg(dev, "%s addr = 0x%lx, size = 0x%lx\n", __func__, addr, size);
  232. ret = k3_r5f_core_sanity_check(core);
  233. if (ret)
  234. return ret;
  235. ret = k3_r5f_proc_request(core);
  236. if (ret)
  237. return ret;
  238. ret = k3_r5f_prepare(dev);
  239. if (ret) {
  240. dev_err(dev, "R5f prepare failed for core %d\n",
  241. core->tsp.proc_id);
  242. goto proc_release;
  243. }
  244. /* Zero out TCMs so that ECC can be effective on all TCM addresses */
  245. if (core->atcm_enable)
  246. memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
  247. if (core->btcm_enable)
  248. memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
  249. ret = rproc_elf_load_image(dev, addr, size);
  250. if (ret < 0) {
  251. dev_err(dev, "Loading elf failedi %d\n", ret);
  252. goto proc_release;
  253. }
  254. boot_vector = rproc_elf_get_boot_addr(dev, addr);
  255. dev_dbg(dev, "%s: Boot vector = 0x%x\n", __func__, boot_vector);
  256. ret = ti_sci_proc_set_config(&core->tsp, boot_vector, 0, 0);
  257. proc_release:
  258. k3_r5f_proc_release(core);
  259. return ret;
  260. }
  261. static int k3_r5f_core_halt(struct k3_r5f_core *core)
  262. {
  263. int ret;
  264. ret = ti_sci_proc_set_control(&core->tsp,
  265. PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
  266. if (ret)
  267. dev_err(core->dev, "Core %d failed to stop\n",
  268. core->tsp.proc_id);
  269. return ret;
  270. }
  271. static int k3_r5f_core_run(struct k3_r5f_core *core)
  272. {
  273. int ret;
  274. ret = ti_sci_proc_set_control(&core->tsp,
  275. 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
  276. if (ret) {
  277. dev_err(core->dev, "Core %d failed to start\n",
  278. core->tsp.proc_id);
  279. return ret;
  280. }
  281. return 0;
  282. }
  283. /**
  284. * k3_r5f_start() - Start the remote processor
  285. * @dev: rproc device pointer
  286. *
  287. * Return: 0 if all went ok, else return appropriate error
  288. */
  289. static int k3_r5f_start(struct udevice *dev)
  290. {
  291. struct k3_r5f_core *core = dev_get_priv(dev);
  292. struct k3_r5f_cluster *cluster = core->cluster;
  293. int ret, c;
  294. dev_dbg(dev, "%s\n", __func__);
  295. ret = k3_r5f_core_sanity_check(core);
  296. if (ret)
  297. return ret;
  298. ret = k3_r5f_proc_request(core);
  299. if (ret)
  300. return ret;
  301. if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
  302. if (is_primary_core(core)) {
  303. for (c = NR_CORES - 1; c >= 0; c--) {
  304. ret = k3_r5f_core_run(cluster->cores[c]);
  305. if (ret)
  306. goto unroll_core_run;
  307. }
  308. } else {
  309. dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
  310. core->tsp.proc_id);
  311. ret = -EINVAL;
  312. goto proc_release;
  313. }
  314. } else {
  315. ret = k3_r5f_core_run(core);
  316. if (ret)
  317. goto proc_release;
  318. }
  319. core->in_use = true;
  320. k3_r5f_proc_release(core);
  321. return 0;
  322. unroll_core_run:
  323. while (c < NR_CORES) {
  324. k3_r5f_core_halt(cluster->cores[c]);
  325. c++;
  326. }
  327. proc_release:
  328. k3_r5f_proc_release(core);
  329. return ret;
  330. }
  331. static int k3_r5f_split_reset(struct k3_r5f_core *core)
  332. {
  333. int ret;
  334. dev_dbg(dev, "%s\n", __func__);
  335. if (reset_assert(&core->reset))
  336. ret = -EINVAL;
  337. if (ti_sci_proc_power_domain_off(&core->tsp))
  338. ret = -EINVAL;
  339. return ret;
  340. }
  341. static int k3_r5f_lockstep_reset(struct k3_r5f_cluster *cluster)
  342. {
  343. int ret = 0, c;
  344. dev_dbg(dev, "%s\n", __func__);
  345. for (c = 0; c < NR_CORES; c++)
  346. if (reset_assert(&cluster->cores[c]->reset))
  347. ret = -EINVAL;
  348. /* disable PSC modules on all applicable cores */
  349. for (c = 0; c < NR_CORES; c++)
  350. if (ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp))
  351. ret = -EINVAL;
  352. return ret;
  353. }
  354. static int k3_r5f_unprepare(struct udevice *dev)
  355. {
  356. struct k3_r5f_core *core = dev_get_priv(dev);
  357. struct k3_r5f_cluster *cluster = core->cluster;
  358. int ret;
  359. dev_dbg(dev, "%s\n", __func__);
  360. if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
  361. if (is_primary_core(core))
  362. ret = k3_r5f_lockstep_reset(cluster);
  363. } else {
  364. ret = k3_r5f_split_reset(core);
  365. }
  366. if (ret)
  367. dev_warn(dev, "Unable to enable cores for TCM loading %d\n",
  368. ret);
  369. return 0;
  370. }
  371. static int k3_r5f_stop(struct udevice *dev)
  372. {
  373. struct k3_r5f_core *core = dev_get_priv(dev);
  374. struct k3_r5f_cluster *cluster = core->cluster;
  375. int c, ret;
  376. dev_dbg(dev, "%s\n", __func__);
  377. ret = k3_r5f_proc_request(core);
  378. if (ret)
  379. return ret;
  380. core->in_use = false;
  381. if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
  382. if (is_primary_core(core)) {
  383. for (c = 0; c < NR_CORES; c++)
  384. k3_r5f_core_halt(cluster->cores[c]);
  385. } else {
  386. dev_err(dev, "Invalid op: Trying to stop secondary core in lockstep mode\n");
  387. ret = -EINVAL;
  388. goto proc_release;
  389. }
  390. } else {
  391. k3_r5f_core_halt(core);
  392. }
  393. ret = k3_r5f_unprepare(dev);
  394. proc_release:
  395. k3_r5f_proc_release(core);
  396. return ret;
  397. }
  398. static void *k3_r5f_da_to_va(struct udevice *dev, ulong da, ulong size)
  399. {
  400. struct k3_r5f_core *core = dev_get_priv(dev);
  401. void __iomem *va = NULL;
  402. phys_addr_t bus_addr;
  403. u32 dev_addr, offset;
  404. ulong mem_size;
  405. int i;
  406. dev_dbg(dev, "%s\n", __func__);
  407. if (size <= 0)
  408. return NULL;
  409. for (i = 0; i < core->num_mems; i++) {
  410. bus_addr = core->mem[i].bus_addr;
  411. dev_addr = core->mem[i].dev_addr;
  412. mem_size = core->mem[i].size;
  413. if (da >= bus_addr && (da + size) <= (bus_addr + mem_size)) {
  414. offset = da - bus_addr;
  415. va = core->mem[i].cpu_addr + offset;
  416. return (__force void *)va;
  417. }
  418. if (da >= dev_addr && (da + size) <= (dev_addr + mem_size)) {
  419. offset = da - dev_addr;
  420. va = core->mem[i].cpu_addr + offset;
  421. return (__force void *)va;
  422. }
  423. }
  424. /* Assume it is DDR region and return da */
  425. return map_physmem(da, size, MAP_NOCACHE);
  426. }
  427. static int k3_r5f_init(struct udevice *dev)
  428. {
  429. return 0;
  430. }
  431. static int k3_r5f_reset(struct udevice *dev)
  432. {
  433. return 0;
  434. }
  435. static const struct dm_rproc_ops k3_r5f_rproc_ops = {
  436. .init = k3_r5f_init,
  437. .reset = k3_r5f_reset,
  438. .start = k3_r5f_start,
  439. .stop = k3_r5f_stop,
  440. .load = k3_r5f_load,
  441. .device_to_virt = k3_r5f_da_to_va,
  442. };
  443. static int k3_r5f_rproc_configure(struct k3_r5f_core *core)
  444. {
  445. struct k3_r5f_cluster *cluster = core->cluster;
  446. u32 set_cfg = 0, clr_cfg = 0, cfg, ctrl, sts;
  447. bool lockstep_permitted;
  448. u64 boot_vec = 0;
  449. int ret;
  450. dev_dbg(dev, "%s\n", __func__);
  451. ret = ti_sci_proc_request(&core->tsp);
  452. if (ret < 0)
  453. return ret;
  454. /* Do not touch boot vector now. Load will take care of it. */
  455. clr_cfg |= PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR;
  456. ret = ti_sci_proc_get_status(&core->tsp, &boot_vec, &cfg, &ctrl, &sts);
  457. if (ret)
  458. goto out;
  459. /* Sanity check for Lockstep mode */
  460. lockstep_permitted = !!(sts &
  461. PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
  462. if (cluster->mode && is_primary_core(core) && !lockstep_permitted) {
  463. dev_err(core->dev, "LockStep mode not permitted on this device\n");
  464. ret = -EINVAL;
  465. goto out;
  466. }
  467. /* Primary core only configuration */
  468. if (is_primary_core(core)) {
  469. /* always enable ARM mode */
  470. clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TEINIT;
  471. if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
  472. set_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
  473. else if (lockstep_permitted)
  474. clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
  475. }
  476. if (core->atcm_enable)
  477. set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
  478. else
  479. clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
  480. if (core->btcm_enable)
  481. set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
  482. else
  483. clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
  484. if (core->loczrama)
  485. set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
  486. else
  487. clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
  488. ret = k3_r5f_core_halt(core);
  489. if (ret)
  490. goto out;
  491. ret = ti_sci_proc_set_config(&core->tsp, boot_vec, set_cfg, clr_cfg);
  492. out:
  493. ti_sci_proc_release(&core->tsp);
  494. return ret;
  495. }
  496. static int ti_sci_proc_of_to_priv(struct udevice *dev, struct ti_sci_proc *tsp)
  497. {
  498. u32 ids[2];
  499. int ret;
  500. dev_dbg(dev, "%s\n", __func__);
  501. tsp->sci = ti_sci_get_by_phandle(dev, "ti,sci");
  502. if (IS_ERR(tsp->sci)) {
  503. dev_err(dev, "ti_sci get failed: %ld\n", PTR_ERR(tsp->sci));
  504. return PTR_ERR(tsp->sci);
  505. }
  506. ret = dev_read_u32_array(dev, "ti,sci-proc-ids", ids, 2);
  507. if (ret) {
  508. dev_err(dev, "Proc IDs not populated %d\n", ret);
  509. return ret;
  510. }
  511. tsp->ops = &tsp->sci->ops.proc_ops;
  512. tsp->proc_id = ids[0];
  513. tsp->host_id = ids[1];
  514. tsp->dev_id = dev_read_u32_default(dev, "ti,sci-dev-id",
  515. TI_SCI_RESOURCE_NULL);
  516. if (tsp->dev_id == TI_SCI_RESOURCE_NULL) {
  517. dev_err(dev, "Device ID not populated %d\n", ret);
  518. return -ENODEV;
  519. }
  520. return 0;
  521. }
  522. static int k3_r5f_of_to_priv(struct k3_r5f_core *core)
  523. {
  524. int ret;
  525. dev_dbg(dev, "%s\n", __func__);
  526. core->atcm_enable = dev_read_u32_default(core->dev, "atcm-enable", 0);
  527. core->btcm_enable = dev_read_u32_default(core->dev, "btcm-enable", 1);
  528. core->loczrama = dev_read_u32_default(core->dev, "loczrama", 1);
  529. ret = ti_sci_proc_of_to_priv(core->dev, &core->tsp);
  530. if (ret)
  531. return ret;
  532. ret = reset_get_by_index(core->dev, 0, &core->reset);
  533. if (ret) {
  534. dev_err(core->dev, "Reset lines not available: %d\n", ret);
  535. return ret;
  536. }
  537. return 0;
  538. }
  539. static int k3_r5f_core_of_get_memories(struct k3_r5f_core *core)
  540. {
  541. static const char * const mem_names[] = {"atcm", "btcm"};
  542. struct udevice *dev = core->dev;
  543. int i;
  544. dev_dbg(dev, "%s\n", __func__);
  545. core->num_mems = ARRAY_SIZE(mem_names);
  546. core->mem = calloc(core->num_mems, sizeof(*core->mem));
  547. if (!core->mem)
  548. return -ENOMEM;
  549. for (i = 0; i < core->num_mems; i++) {
  550. core->mem[i].bus_addr = dev_read_addr_size_name(dev,
  551. mem_names[i],
  552. (fdt_addr_t *)&core->mem[i].size);
  553. if (core->mem[i].bus_addr == FDT_ADDR_T_NONE) {
  554. dev_err(dev, "%s bus address not found\n",
  555. mem_names[i]);
  556. return -EINVAL;
  557. }
  558. core->mem[i].cpu_addr = map_physmem(core->mem[i].bus_addr,
  559. core->mem[i].size,
  560. MAP_NOCACHE);
  561. if (!strcmp(mem_names[i], "atcm")) {
  562. core->mem[i].dev_addr = core->loczrama ?
  563. 0 : K3_R5_TCM_DEV_ADDR;
  564. } else {
  565. core->mem[i].dev_addr = core->loczrama ?
  566. K3_R5_TCM_DEV_ADDR : 0;
  567. }
  568. dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
  569. mem_names[i], &core->mem[i].bus_addr,
  570. core->mem[i].size, core->mem[i].cpu_addr,
  571. core->mem[i].dev_addr);
  572. }
  573. return 0;
  574. }
  575. /**
  576. * k3_r5f_probe() - Basic probe
  577. * @dev: corresponding k3 remote processor device
  578. *
  579. * Return: 0 if all goes good, else appropriate error message.
  580. */
  581. static int k3_r5f_probe(struct udevice *dev)
  582. {
  583. struct k3_r5f_cluster *cluster = dev_get_priv(dev->parent);
  584. struct k3_r5f_core *core = dev_get_priv(dev);
  585. bool r_state;
  586. int ret;
  587. dev_dbg(dev, "%s\n", __func__);
  588. core->dev = dev;
  589. ret = k3_r5f_of_to_priv(core);
  590. if (ret)
  591. return ret;
  592. core->cluster = cluster;
  593. /* Assume Primary core gets probed first */
  594. if (!cluster->cores[0])
  595. cluster->cores[0] = core;
  596. else
  597. cluster->cores[1] = core;
  598. ret = k3_r5f_core_of_get_memories(core);
  599. if (ret) {
  600. dev_err(dev, "Rproc getting internal memories failed\n");
  601. return ret;
  602. }
  603. ret = core->tsp.sci->ops.dev_ops.is_on(core->tsp.sci, core->tsp.dev_id,
  604. &r_state, &core->in_use);
  605. if (ret)
  606. return ret;
  607. if (core->in_use) {
  608. dev_info(dev, "Core %d is already in use. No rproc commands work\n",
  609. core->tsp.proc_id);
  610. return 0;
  611. }
  612. /* Make sure Local reset is asserted. Redundant? */
  613. reset_assert(&core->reset);
  614. ret = k3_r5f_rproc_configure(core);
  615. if (ret) {
  616. dev_err(dev, "rproc configure failed %d\n", ret);
  617. return ret;
  618. }
  619. dev_dbg(dev, "Remoteproc successfully probed\n");
  620. return 0;
  621. }
  622. static int k3_r5f_remove(struct udevice *dev)
  623. {
  624. struct k3_r5f_core *core = dev_get_priv(dev);
  625. free(core->mem);
  626. ti_sci_proc_release(&core->tsp);
  627. return 0;
  628. }
  629. static const struct udevice_id k3_r5f_rproc_ids[] = {
  630. { .compatible = "ti,am654-r5f"},
  631. { .compatible = "ti,j721e-r5f"},
  632. {}
  633. };
  634. U_BOOT_DRIVER(k3_r5f_rproc) = {
  635. .name = "k3_r5f_rproc",
  636. .of_match = k3_r5f_rproc_ids,
  637. .id = UCLASS_REMOTEPROC,
  638. .ops = &k3_r5f_rproc_ops,
  639. .probe = k3_r5f_probe,
  640. .remove = k3_r5f_remove,
  641. .priv_auto_alloc_size = sizeof(struct k3_r5f_core),
  642. };
  643. static int k3_r5f_cluster_probe(struct udevice *dev)
  644. {
  645. struct k3_r5f_cluster *cluster = dev_get_priv(dev);
  646. dev_dbg(dev, "%s\n", __func__);
  647. cluster->mode = dev_read_u32_default(dev, "lockstep-mode",
  648. CLUSTER_MODE_LOCKSTEP);
  649. if (device_get_child_count(dev) != 2) {
  650. dev_err(dev, "Invalid number of R5 cores");
  651. return -EINVAL;
  652. }
  653. dev_dbg(dev, "%s: Cluster successfully probed in %s mode\n",
  654. __func__, cluster->mode ? "lockstep" : "split");
  655. return 0;
  656. }
  657. static const struct udevice_id k3_r5fss_ids[] = {
  658. { .compatible = "ti,am654-r5fss"},
  659. { .compatible = "ti,j721e-r5fss"},
  660. {}
  661. };
  662. U_BOOT_DRIVER(k3_r5fss) = {
  663. .name = "k3_r5fss",
  664. .of_match = k3_r5fss_ids,
  665. .id = UCLASS_MISC,
  666. .probe = k3_r5f_cluster_probe,
  667. .priv_auto_alloc_size = sizeof(struct k3_r5f_cluster),
  668. .flags = DM_FLAG_DEFAULT_PD_CTRL_OFF,
  669. };