flash.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/fs.h>
  4. #include <linux/semaphore.h>
  5. #include <linux/slab.h>
  6. #include <linux/uaccess.h>
  7. #include <asm/rtas.h>
  8. #include "cxl.h"
  9. #include "hcalls.h"
  10. #define DOWNLOAD_IMAGE 1
  11. #define VALIDATE_IMAGE 2
  12. struct ai_header {
  13. u16 version;
  14. u8 reserved0[6];
  15. u16 vendor;
  16. u16 device;
  17. u16 subsystem_vendor;
  18. u16 subsystem;
  19. u64 image_offset;
  20. u64 image_length;
  21. u8 reserved1[96];
  22. };
  23. static struct semaphore sem;
  24. static unsigned long *buffer[CXL_AI_MAX_ENTRIES];
  25. static struct sg_list *le;
  26. static u64 continue_token;
  27. static unsigned int transfer;
  28. struct update_props_workarea {
  29. __be32 phandle;
  30. __be32 state;
  31. __be64 reserved;
  32. __be32 nprops;
  33. } __packed;
  34. struct update_nodes_workarea {
  35. __be32 state;
  36. __be64 unit_address;
  37. __be32 reserved;
  38. } __packed;
  39. #define DEVICE_SCOPE 3
  40. #define NODE_ACTION_MASK 0xff000000
  41. #define NODE_COUNT_MASK 0x00ffffff
  42. #define OPCODE_DELETE 0x01000000
  43. #define OPCODE_UPDATE 0x02000000
  44. #define OPCODE_ADD 0x03000000
  45. static int rcall(int token, char *buf, s32 scope)
  46. {
  47. int rc;
  48. spin_lock(&rtas_data_buf_lock);
  49. memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
  50. rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
  51. memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
  52. spin_unlock(&rtas_data_buf_lock);
  53. return rc;
  54. }
  55. static int update_property(struct device_node *dn, const char *name,
  56. u32 vd, char *value)
  57. {
  58. struct property *new_prop;
  59. u32 *val;
  60. int rc;
  61. new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
  62. if (!new_prop)
  63. return -ENOMEM;
  64. new_prop->name = kstrdup(name, GFP_KERNEL);
  65. if (!new_prop->name) {
  66. kfree(new_prop);
  67. return -ENOMEM;
  68. }
  69. new_prop->length = vd;
  70. new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
  71. if (!new_prop->value) {
  72. kfree(new_prop->name);
  73. kfree(new_prop);
  74. return -ENOMEM;
  75. }
  76. memcpy(new_prop->value, value, vd);
  77. val = (u32 *)new_prop->value;
  78. rc = cxl_update_properties(dn, new_prop);
  79. pr_devel("%pOFn: update property (%s, length: %i, value: %#x)\n",
  80. dn, name, vd, be32_to_cpu(*val));
  81. if (rc) {
  82. kfree(new_prop->name);
  83. kfree(new_prop->value);
  84. kfree(new_prop);
  85. }
  86. return rc;
  87. }
  88. static int update_node(__be32 phandle, s32 scope)
  89. {
  90. struct update_props_workarea *upwa;
  91. struct device_node *dn;
  92. int i, rc, ret;
  93. char *prop_data;
  94. char *buf;
  95. int token;
  96. u32 nprops;
  97. u32 vd;
  98. token = rtas_token("ibm,update-properties");
  99. if (token == RTAS_UNKNOWN_SERVICE)
  100. return -EINVAL;
  101. buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
  102. if (!buf)
  103. return -ENOMEM;
  104. dn = of_find_node_by_phandle(be32_to_cpu(phandle));
  105. if (!dn) {
  106. kfree(buf);
  107. return -ENOENT;
  108. }
  109. upwa = (struct update_props_workarea *)&buf[0];
  110. upwa->phandle = phandle;
  111. do {
  112. rc = rcall(token, buf, scope);
  113. if (rc < 0)
  114. break;
  115. prop_data = buf + sizeof(*upwa);
  116. nprops = be32_to_cpu(upwa->nprops);
  117. if (*prop_data == 0) {
  118. prop_data++;
  119. vd = be32_to_cpu(*(__be32 *)prop_data);
  120. prop_data += vd + sizeof(vd);
  121. nprops--;
  122. }
  123. for (i = 0; i < nprops; i++) {
  124. char *prop_name;
  125. prop_name = prop_data;
  126. prop_data += strlen(prop_name) + 1;
  127. vd = be32_to_cpu(*(__be32 *)prop_data);
  128. prop_data += sizeof(vd);
  129. if ((vd != 0x00000000) && (vd != 0x80000000)) {
  130. ret = update_property(dn, prop_name, vd,
  131. prop_data);
  132. if (ret)
  133. pr_err("cxl: Could not update property %s - %i\n",
  134. prop_name, ret);
  135. prop_data += vd;
  136. }
  137. }
  138. } while (rc == 1);
  139. of_node_put(dn);
  140. kfree(buf);
  141. return rc;
  142. }
  143. static int update_devicetree(struct cxl *adapter, s32 scope)
  144. {
  145. struct update_nodes_workarea *unwa;
  146. u32 action, node_count;
  147. int token, rc, i;
  148. __be32 *data, phandle;
  149. char *buf;
  150. token = rtas_token("ibm,update-nodes");
  151. if (token == RTAS_UNKNOWN_SERVICE)
  152. return -EINVAL;
  153. buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
  154. if (!buf)
  155. return -ENOMEM;
  156. unwa = (struct update_nodes_workarea *)&buf[0];
  157. unwa->unit_address = cpu_to_be64(adapter->guest->handle);
  158. do {
  159. rc = rcall(token, buf, scope);
  160. if (rc && rc != 1)
  161. break;
  162. data = (__be32 *)buf + 4;
  163. while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
  164. action = be32_to_cpu(*data) & NODE_ACTION_MASK;
  165. node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
  166. pr_devel("device reconfiguration - action: %#x, nodes: %#x\n",
  167. action, node_count);
  168. data++;
  169. for (i = 0; i < node_count; i++) {
  170. phandle = *data++;
  171. switch (action) {
  172. case OPCODE_DELETE:
  173. /* nothing to do */
  174. break;
  175. case OPCODE_UPDATE:
  176. update_node(phandle, scope);
  177. break;
  178. case OPCODE_ADD:
  179. /* nothing to do, just move pointer */
  180. data++;
  181. break;
  182. }
  183. }
  184. }
  185. } while (rc == 1);
  186. kfree(buf);
  187. return 0;
  188. }
  189. static int handle_image(struct cxl *adapter, int operation,
  190. long (*fct)(u64, u64, u64, u64 *),
  191. struct cxl_adapter_image *ai)
  192. {
  193. size_t mod, s_copy, len_chunk = 0;
  194. struct ai_header *header = NULL;
  195. unsigned int entries = 0, i;
  196. void *dest, *from;
  197. int rc = 0, need_header;
  198. /* base adapter image header */
  199. need_header = (ai->flags & CXL_AI_NEED_HEADER);
  200. if (need_header) {
  201. header = kzalloc(sizeof(struct ai_header), GFP_KERNEL);
  202. if (!header)
  203. return -ENOMEM;
  204. header->version = cpu_to_be16(1);
  205. header->vendor = cpu_to_be16(adapter->guest->vendor);
  206. header->device = cpu_to_be16(adapter->guest->device);
  207. header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor);
  208. header->subsystem = cpu_to_be16(adapter->guest->subsystem);
  209. header->image_offset = cpu_to_be64(CXL_AI_HEADER_SIZE);
  210. header->image_length = cpu_to_be64(ai->len_image);
  211. }
  212. /* number of entries in the list */
  213. len_chunk = ai->len_data;
  214. if (need_header)
  215. len_chunk += CXL_AI_HEADER_SIZE;
  216. entries = len_chunk / CXL_AI_BUFFER_SIZE;
  217. mod = len_chunk % CXL_AI_BUFFER_SIZE;
  218. if (mod)
  219. entries++;
  220. if (entries > CXL_AI_MAX_ENTRIES) {
  221. rc = -EINVAL;
  222. goto err;
  223. }
  224. /* < -- MAX_CHUNK_SIZE = 4096 * 256 = 1048576 bytes -->
  225. * chunk 0 ----------------------------------------------------
  226. * | header | data |
  227. * ----------------------------------------------------
  228. * chunk 1 ----------------------------------------------------
  229. * | data |
  230. * ----------------------------------------------------
  231. * ....
  232. * chunk n ----------------------------------------------------
  233. * | data |
  234. * ----------------------------------------------------
  235. */
  236. from = (void *) ai->data;
  237. for (i = 0; i < entries; i++) {
  238. dest = buffer[i];
  239. s_copy = CXL_AI_BUFFER_SIZE;
  240. if ((need_header) && (i == 0)) {
  241. /* add adapter image header */
  242. memcpy(buffer[i], header, sizeof(struct ai_header));
  243. s_copy = CXL_AI_BUFFER_SIZE - CXL_AI_HEADER_SIZE;
  244. dest += CXL_AI_HEADER_SIZE; /* image offset */
  245. }
  246. if ((i == (entries - 1)) && mod)
  247. s_copy = mod;
  248. /* copy data */
  249. if (copy_from_user(dest, from, s_copy))
  250. goto err;
  251. /* fill in the list */
  252. le[i].phys_addr = cpu_to_be64(virt_to_phys(buffer[i]));
  253. le[i].len = cpu_to_be64(CXL_AI_BUFFER_SIZE);
  254. if ((i == (entries - 1)) && mod)
  255. le[i].len = cpu_to_be64(mod);
  256. from += s_copy;
  257. }
  258. pr_devel("%s (op: %i, need header: %i, entries: %i, token: %#llx)\n",
  259. __func__, operation, need_header, entries, continue_token);
  260. /*
  261. * download/validate the adapter image to the coherent
  262. * platform facility
  263. */
  264. rc = fct(adapter->guest->handle, virt_to_phys(le), entries,
  265. &continue_token);
  266. if (rc == 0) /* success of download/validation operation */
  267. continue_token = 0;
  268. err:
  269. kfree(header);
  270. return rc;
  271. }
  272. static int transfer_image(struct cxl *adapter, int operation,
  273. struct cxl_adapter_image *ai)
  274. {
  275. int rc = 0;
  276. int afu;
  277. switch (operation) {
  278. case DOWNLOAD_IMAGE:
  279. rc = handle_image(adapter, operation,
  280. &cxl_h_download_adapter_image, ai);
  281. if (rc < 0) {
  282. pr_devel("resetting adapter\n");
  283. cxl_h_reset_adapter(adapter->guest->handle);
  284. }
  285. return rc;
  286. case VALIDATE_IMAGE:
  287. rc = handle_image(adapter, operation,
  288. &cxl_h_validate_adapter_image, ai);
  289. if (rc < 0) {
  290. pr_devel("resetting adapter\n");
  291. cxl_h_reset_adapter(adapter->guest->handle);
  292. return rc;
  293. }
  294. if (rc == 0) {
  295. pr_devel("remove current afu\n");
  296. for (afu = 0; afu < adapter->slices; afu++)
  297. cxl_guest_remove_afu(adapter->afu[afu]);
  298. pr_devel("resetting adapter\n");
  299. cxl_h_reset_adapter(adapter->guest->handle);
  300. /* The entire image has now been
  301. * downloaded and the validation has
  302. * been successfully performed.
  303. * After that, the partition should call
  304. * ibm,update-nodes and
  305. * ibm,update-properties to receive the
  306. * current configuration
  307. */
  308. rc = update_devicetree(adapter, DEVICE_SCOPE);
  309. transfer = 1;
  310. }
  311. return rc;
  312. }
  313. return -EINVAL;
  314. }
  315. static long ioctl_transfer_image(struct cxl *adapter, int operation,
  316. struct cxl_adapter_image __user *uai)
  317. {
  318. struct cxl_adapter_image ai;
  319. pr_devel("%s\n", __func__);
  320. if (copy_from_user(&ai, uai, sizeof(struct cxl_adapter_image)))
  321. return -EFAULT;
  322. /*
  323. * Make sure reserved fields and bits are set to 0
  324. */
  325. if (ai.reserved1 || ai.reserved2 || ai.reserved3 || ai.reserved4 ||
  326. (ai.flags & ~CXL_AI_ALL))
  327. return -EINVAL;
  328. return transfer_image(adapter, operation, &ai);
  329. }
  330. static int device_open(struct inode *inode, struct file *file)
  331. {
  332. int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
  333. struct cxl *adapter;
  334. int rc = 0, i;
  335. pr_devel("in %s\n", __func__);
  336. BUG_ON(sizeof(struct ai_header) != CXL_AI_HEADER_SIZE);
  337. /* Allows one process to open the device by using a semaphore */
  338. if (down_interruptible(&sem) != 0)
  339. return -EPERM;
  340. if (!(adapter = get_cxl_adapter(adapter_num))) {
  341. rc = -ENODEV;
  342. goto err_unlock;
  343. }
  344. file->private_data = adapter;
  345. continue_token = 0;
  346. transfer = 0;
  347. for (i = 0; i < CXL_AI_MAX_ENTRIES; i++)
  348. buffer[i] = NULL;
  349. /* aligned buffer containing list entries which describes up to
  350. * 1 megabyte of data (256 entries of 4096 bytes each)
  351. * Logical real address of buffer 0 - Buffer 0 length in bytes
  352. * Logical real address of buffer 1 - Buffer 1 length in bytes
  353. * Logical real address of buffer 2 - Buffer 2 length in bytes
  354. * ....
  355. * ....
  356. * Logical real address of buffer N - Buffer N length in bytes
  357. */
  358. le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
  359. if (!le) {
  360. rc = -ENOMEM;
  361. goto err;
  362. }
  363. for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
  364. buffer[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
  365. if (!buffer[i]) {
  366. rc = -ENOMEM;
  367. goto err1;
  368. }
  369. }
  370. return 0;
  371. err1:
  372. for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
  373. if (buffer[i])
  374. free_page((unsigned long) buffer[i]);
  375. }
  376. if (le)
  377. free_page((unsigned long) le);
  378. err:
  379. put_device(&adapter->dev);
  380. err_unlock:
  381. up(&sem);
  382. return rc;
  383. }
  384. static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  385. {
  386. struct cxl *adapter = file->private_data;
  387. pr_devel("in %s\n", __func__);
  388. if (cmd == CXL_IOCTL_DOWNLOAD_IMAGE)
  389. return ioctl_transfer_image(adapter,
  390. DOWNLOAD_IMAGE,
  391. (struct cxl_adapter_image __user *)arg);
  392. else if (cmd == CXL_IOCTL_VALIDATE_IMAGE)
  393. return ioctl_transfer_image(adapter,
  394. VALIDATE_IMAGE,
  395. (struct cxl_adapter_image __user *)arg);
  396. else
  397. return -EINVAL;
  398. }
  399. static int device_close(struct inode *inode, struct file *file)
  400. {
  401. struct cxl *adapter = file->private_data;
  402. int i;
  403. pr_devel("in %s\n", __func__);
  404. for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
  405. if (buffer[i])
  406. free_page((unsigned long) buffer[i]);
  407. }
  408. if (le)
  409. free_page((unsigned long) le);
  410. up(&sem);
  411. put_device(&adapter->dev);
  412. continue_token = 0;
  413. /* reload the module */
  414. if (transfer)
  415. cxl_guest_reload_module(adapter);
  416. else {
  417. pr_devel("resetting adapter\n");
  418. cxl_h_reset_adapter(adapter->guest->handle);
  419. }
  420. transfer = 0;
  421. return 0;
  422. }
  423. static const struct file_operations fops = {
  424. .owner = THIS_MODULE,
  425. .open = device_open,
  426. .unlocked_ioctl = device_ioctl,
  427. .compat_ioctl = compat_ptr_ioctl,
  428. .release = device_close,
  429. };
  430. void cxl_guest_remove_chardev(struct cxl *adapter)
  431. {
  432. cdev_del(&adapter->guest->cdev);
  433. }
  434. int cxl_guest_add_chardev(struct cxl *adapter)
  435. {
  436. dev_t devt;
  437. int rc;
  438. devt = MKDEV(MAJOR(cxl_get_dev()), CXL_CARD_MINOR(adapter));
  439. cdev_init(&adapter->guest->cdev, &fops);
  440. if ((rc = cdev_add(&adapter->guest->cdev, devt, 1))) {
  441. dev_err(&adapter->dev,
  442. "Unable to add chardev on adapter (card%i): %i\n",
  443. adapter->adapter_num, rc);
  444. goto err;
  445. }
  446. adapter->dev.devt = devt;
  447. sema_init(&sem, 1);
  448. err:
  449. return rc;
  450. }