kexec.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137
  1. /*
  2. * kexec.c - kexec system call
  3. * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
  4. *
  5. * This source code is licensed under the GNU General Public License,
  6. * Version 2. See the file COPYING for more details.
  7. */
  8. #include <linux/capability.h>
  9. #include <linux/mm.h>
  10. #include <linux/file.h>
  11. #include <linux/slab.h>
  12. #include <linux/fs.h>
  13. #include <linux/kexec.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/list.h>
  16. #include <linux/highmem.h>
  17. #include <linux/syscalls.h>
  18. #include <linux/reboot.h>
  19. #include <linux/syscalls.h>
  20. #include <linux/ioport.h>
  21. #include <linux/hardirq.h>
  22. #include <linux/elf.h>
  23. #include <linux/elfcore.h>
  24. #include <asm/page.h>
  25. #include <asm/uaccess.h>
  26. #include <asm/io.h>
  27. #include <asm/system.h>
  28. #include <asm/semaphore.h>
  29. /* Per cpu memory for storing cpu states in case of system crash. */
  30. note_buf_t* crash_notes;
  31. /* Location of the reserved area for the crash kernel */
  32. struct resource crashk_res = {
  33. .name = "Crash kernel",
  34. .start = 0,
  35. .end = 0,
  36. .flags = IORESOURCE_BUSY | IORESOURCE_MEM
  37. };
  38. int kexec_should_crash(struct task_struct *p)
  39. {
  40. if (in_interrupt() || !p->pid || is_init(p) || panic_on_oops)
  41. return 1;
  42. return 0;
  43. }
  44. /*
  45. * When kexec transitions to the new kernel there is a one-to-one
  46. * mapping between physical and virtual addresses. On processors
  47. * where you can disable the MMU this is trivial, and easy. For
  48. * others it is still a simple predictable page table to setup.
  49. *
  50. * In that environment kexec copies the new kernel to its final
  51. * resting place. This means I can only support memory whose
  52. * physical address can fit in an unsigned long. In particular
  53. * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
  54. * If the assembly stub has more restrictive requirements
  55. * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
  56. * defined more restrictively in <asm/kexec.h>.
  57. *
  58. * The code for the transition from the current kernel to the
  59. * the new kernel is placed in the control_code_buffer, whose size
  60. * is given by KEXEC_CONTROL_CODE_SIZE. In the best case only a single
  61. * page of memory is necessary, but some architectures require more.
  62. * Because this memory must be identity mapped in the transition from
  63. * virtual to physical addresses it must live in the range
  64. * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
  65. * modifiable.
  66. *
  67. * The assembly stub in the control code buffer is passed a linked list
  68. * of descriptor pages detailing the source pages of the new kernel,
  69. * and the destination addresses of those source pages. As this data
  70. * structure is not used in the context of the current OS, it must
  71. * be self-contained.
  72. *
  73. * The code has been made to work with highmem pages and will use a
  74. * destination page in its final resting place (if it happens
  75. * to allocate it). The end product of this is that most of the
  76. * physical address space, and most of RAM can be used.
  77. *
  78. * Future directions include:
  79. * - allocating a page table with the control code buffer identity
  80. * mapped, to simplify machine_kexec and make kexec_on_panic more
  81. * reliable.
  82. */
  83. /*
  84. * KIMAGE_NO_DEST is an impossible destination address..., for
  85. * allocating pages whose destination address we do not care about.
  86. */
  87. #define KIMAGE_NO_DEST (-1UL)
  88. static int kimage_is_destination_range(struct kimage *image,
  89. unsigned long start, unsigned long end);
  90. static struct page *kimage_alloc_page(struct kimage *image,
  91. gfp_t gfp_mask,
  92. unsigned long dest);
  93. static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
  94. unsigned long nr_segments,
  95. struct kexec_segment __user *segments)
  96. {
  97. size_t segment_bytes;
  98. struct kimage *image;
  99. unsigned long i;
  100. int result;
  101. /* Allocate a controlling structure */
  102. result = -ENOMEM;
  103. image = kzalloc(sizeof(*image), GFP_KERNEL);
  104. if (!image)
  105. goto out;
  106. image->head = 0;
  107. image->entry = &image->head;
  108. image->last_entry = &image->head;
  109. image->control_page = ~0; /* By default this does not apply */
  110. image->start = entry;
  111. image->type = KEXEC_TYPE_DEFAULT;
  112. /* Initialize the list of control pages */
  113. INIT_LIST_HEAD(&image->control_pages);
  114. /* Initialize the list of destination pages */
  115. INIT_LIST_HEAD(&image->dest_pages);
  116. /* Initialize the list of unuseable pages */
  117. INIT_LIST_HEAD(&image->unuseable_pages);
  118. /* Read in the segments */
  119. image->nr_segments = nr_segments;
  120. segment_bytes = nr_segments * sizeof(*segments);
  121. result = copy_from_user(image->segment, segments, segment_bytes);
  122. if (result)
  123. goto out;
  124. /*
  125. * Verify we have good destination addresses. The caller is
  126. * responsible for making certain we don't attempt to load
  127. * the new image into invalid or reserved areas of RAM. This
  128. * just verifies it is an address we can use.
  129. *
  130. * Since the kernel does everything in page size chunks ensure
  131. * the destination addreses are page aligned. Too many
  132. * special cases crop of when we don't do this. The most
  133. * insidious is getting overlapping destination addresses
  134. * simply because addresses are changed to page size
  135. * granularity.
  136. */
  137. result = -EADDRNOTAVAIL;
  138. for (i = 0; i < nr_segments; i++) {
  139. unsigned long mstart, mend;
  140. mstart = image->segment[i].mem;
  141. mend = mstart + image->segment[i].memsz;
  142. if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
  143. goto out;
  144. if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
  145. goto out;
  146. }
  147. /* Verify our destination addresses do not overlap.
  148. * If we alloed overlapping destination addresses
  149. * through very weird things can happen with no
  150. * easy explanation as one segment stops on another.
  151. */
  152. result = -EINVAL;
  153. for (i = 0; i < nr_segments; i++) {
  154. unsigned long mstart, mend;
  155. unsigned long j;
  156. mstart = image->segment[i].mem;
  157. mend = mstart + image->segment[i].memsz;
  158. for (j = 0; j < i; j++) {
  159. unsigned long pstart, pend;
  160. pstart = image->segment[j].mem;
  161. pend = pstart + image->segment[j].memsz;
  162. /* Do the segments overlap ? */
  163. if ((mend > pstart) && (mstart < pend))
  164. goto out;
  165. }
  166. }
  167. /* Ensure our buffer sizes are strictly less than
  168. * our memory sizes. This should always be the case,
  169. * and it is easier to check up front than to be surprised
  170. * later on.
  171. */
  172. result = -EINVAL;
  173. for (i = 0; i < nr_segments; i++) {
  174. if (image->segment[i].bufsz > image->segment[i].memsz)
  175. goto out;
  176. }
  177. result = 0;
  178. out:
  179. if (result == 0)
  180. *rimage = image;
  181. else
  182. kfree(image);
  183. return result;
  184. }
  185. static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
  186. unsigned long nr_segments,
  187. struct kexec_segment __user *segments)
  188. {
  189. int result;
  190. struct kimage *image;
  191. /* Allocate and initialize a controlling structure */
  192. image = NULL;
  193. result = do_kimage_alloc(&image, entry, nr_segments, segments);
  194. if (result)
  195. goto out;
  196. *rimage = image;
  197. /*
  198. * Find a location for the control code buffer, and add it
  199. * the vector of segments so that it's pages will also be
  200. * counted as destination pages.
  201. */
  202. result = -ENOMEM;
  203. image->control_code_page = kimage_alloc_control_pages(image,
  204. get_order(KEXEC_CONTROL_CODE_SIZE));
  205. if (!image->control_code_page) {
  206. printk(KERN_ERR "Could not allocate control_code_buffer\n");
  207. goto out;
  208. }
  209. result = 0;
  210. out:
  211. if (result == 0)
  212. *rimage = image;
  213. else
  214. kfree(image);
  215. return result;
  216. }
  217. static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
  218. unsigned long nr_segments,
  219. struct kexec_segment __user *segments)
  220. {
  221. int result;
  222. struct kimage *image;
  223. unsigned long i;
  224. image = NULL;
  225. /* Verify we have a valid entry point */
  226. if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
  227. result = -EADDRNOTAVAIL;
  228. goto out;
  229. }
  230. /* Allocate and initialize a controlling structure */
  231. result = do_kimage_alloc(&image, entry, nr_segments, segments);
  232. if (result)
  233. goto out;
  234. /* Enable the special crash kernel control page
  235. * allocation policy.
  236. */
  237. image->control_page = crashk_res.start;
  238. image->type = KEXEC_TYPE_CRASH;
  239. /*
  240. * Verify we have good destination addresses. Normally
  241. * the caller is responsible for making certain we don't
  242. * attempt to load the new image into invalid or reserved
  243. * areas of RAM. But crash kernels are preloaded into a
  244. * reserved area of ram. We must ensure the addresses
  245. * are in the reserved area otherwise preloading the
  246. * kernel could corrupt things.
  247. */
  248. result = -EADDRNOTAVAIL;
  249. for (i = 0; i < nr_segments; i++) {
  250. unsigned long mstart, mend;
  251. mstart = image->segment[i].mem;
  252. mend = mstart + image->segment[i].memsz - 1;
  253. /* Ensure we are within the crash kernel limits */
  254. if ((mstart < crashk_res.start) || (mend > crashk_res.end))
  255. goto out;
  256. }
  257. /*
  258. * Find a location for the control code buffer, and add
  259. * the vector of segments so that it's pages will also be
  260. * counted as destination pages.
  261. */
  262. result = -ENOMEM;
  263. image->control_code_page = kimage_alloc_control_pages(image,
  264. get_order(KEXEC_CONTROL_CODE_SIZE));
  265. if (!image->control_code_page) {
  266. printk(KERN_ERR "Could not allocate control_code_buffer\n");
  267. goto out;
  268. }
  269. result = 0;
  270. out:
  271. if (result == 0)
  272. *rimage = image;
  273. else
  274. kfree(image);
  275. return result;
  276. }
  277. static int kimage_is_destination_range(struct kimage *image,
  278. unsigned long start,
  279. unsigned long end)
  280. {
  281. unsigned long i;
  282. for (i = 0; i < image->nr_segments; i++) {
  283. unsigned long mstart, mend;
  284. mstart = image->segment[i].mem;
  285. mend = mstart + image->segment[i].memsz;
  286. if ((end > mstart) && (start < mend))
  287. return 1;
  288. }
  289. return 0;
  290. }
  291. static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
  292. {
  293. struct page *pages;
  294. pages = alloc_pages(gfp_mask, order);
  295. if (pages) {
  296. unsigned int count, i;
  297. pages->mapping = NULL;
  298. set_page_private(pages, order);
  299. count = 1 << order;
  300. for (i = 0; i < count; i++)
  301. SetPageReserved(pages + i);
  302. }
  303. return pages;
  304. }
  305. static void kimage_free_pages(struct page *page)
  306. {
  307. unsigned int order, count, i;
  308. order = page_private(page);
  309. count = 1 << order;
  310. for (i = 0; i < count; i++)
  311. ClearPageReserved(page + i);
  312. __free_pages(page, order);
  313. }
  314. static void kimage_free_page_list(struct list_head *list)
  315. {
  316. struct list_head *pos, *next;
  317. list_for_each_safe(pos, next, list) {
  318. struct page *page;
  319. page = list_entry(pos, struct page, lru);
  320. list_del(&page->lru);
  321. kimage_free_pages(page);
  322. }
  323. }
  324. static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
  325. unsigned int order)
  326. {
  327. /* Control pages are special, they are the intermediaries
  328. * that are needed while we copy the rest of the pages
  329. * to their final resting place. As such they must
  330. * not conflict with either the destination addresses
  331. * or memory the kernel is already using.
  332. *
  333. * The only case where we really need more than one of
  334. * these are for architectures where we cannot disable
  335. * the MMU and must instead generate an identity mapped
  336. * page table for all of the memory.
  337. *
  338. * At worst this runs in O(N) of the image size.
  339. */
  340. struct list_head extra_pages;
  341. struct page *pages;
  342. unsigned int count;
  343. count = 1 << order;
  344. INIT_LIST_HEAD(&extra_pages);
  345. /* Loop while I can allocate a page and the page allocated
  346. * is a destination page.
  347. */
  348. do {
  349. unsigned long pfn, epfn, addr, eaddr;
  350. pages = kimage_alloc_pages(GFP_KERNEL, order);
  351. if (!pages)
  352. break;
  353. pfn = page_to_pfn(pages);
  354. epfn = pfn + count;
  355. addr = pfn << PAGE_SHIFT;
  356. eaddr = epfn << PAGE_SHIFT;
  357. if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
  358. kimage_is_destination_range(image, addr, eaddr)) {
  359. list_add(&pages->lru, &extra_pages);
  360. pages = NULL;
  361. }
  362. } while (!pages);
  363. if (pages) {
  364. /* Remember the allocated page... */
  365. list_add(&pages->lru, &image->control_pages);
  366. /* Because the page is already in it's destination
  367. * location we will never allocate another page at
  368. * that address. Therefore kimage_alloc_pages
  369. * will not return it (again) and we don't need
  370. * to give it an entry in image->segment[].
  371. */
  372. }
  373. /* Deal with the destination pages I have inadvertently allocated.
  374. *
  375. * Ideally I would convert multi-page allocations into single
  376. * page allocations, and add everyting to image->dest_pages.
  377. *
  378. * For now it is simpler to just free the pages.
  379. */
  380. kimage_free_page_list(&extra_pages);
  381. return pages;
  382. }
  383. static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
  384. unsigned int order)
  385. {
  386. /* Control pages are special, they are the intermediaries
  387. * that are needed while we copy the rest of the pages
  388. * to their final resting place. As such they must
  389. * not conflict with either the destination addresses
  390. * or memory the kernel is already using.
  391. *
  392. * Control pages are also the only pags we must allocate
  393. * when loading a crash kernel. All of the other pages
  394. * are specified by the segments and we just memcpy
  395. * into them directly.
  396. *
  397. * The only case where we really need more than one of
  398. * these are for architectures where we cannot disable
  399. * the MMU and must instead generate an identity mapped
  400. * page table for all of the memory.
  401. *
  402. * Given the low demand this implements a very simple
  403. * allocator that finds the first hole of the appropriate
  404. * size in the reserved memory region, and allocates all
  405. * of the memory up to and including the hole.
  406. */
  407. unsigned long hole_start, hole_end, size;
  408. struct page *pages;
  409. pages = NULL;
  410. size = (1 << order) << PAGE_SHIFT;
  411. hole_start = (image->control_page + (size - 1)) & ~(size - 1);
  412. hole_end = hole_start + size - 1;
  413. while (hole_end <= crashk_res.end) {
  414. unsigned long i;
  415. if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
  416. break;
  417. if (hole_end > crashk_res.end)
  418. break;
  419. /* See if I overlap any of the segments */
  420. for (i = 0; i < image->nr_segments; i++) {
  421. unsigned long mstart, mend;
  422. mstart = image->segment[i].mem;
  423. mend = mstart + image->segment[i].memsz - 1;
  424. if ((hole_end >= mstart) && (hole_start <= mend)) {
  425. /* Advance the hole to the end of the segment */
  426. hole_start = (mend + (size - 1)) & ~(size - 1);
  427. hole_end = hole_start + size - 1;
  428. break;
  429. }
  430. }
  431. /* If I don't overlap any segments I have found my hole! */
  432. if (i == image->nr_segments) {
  433. pages = pfn_to_page(hole_start >> PAGE_SHIFT);
  434. break;
  435. }
  436. }
  437. if (pages)
  438. image->control_page = hole_end;
  439. return pages;
  440. }
  441. struct page *kimage_alloc_control_pages(struct kimage *image,
  442. unsigned int order)
  443. {
  444. struct page *pages = NULL;
  445. switch (image->type) {
  446. case KEXEC_TYPE_DEFAULT:
  447. pages = kimage_alloc_normal_control_pages(image, order);
  448. break;
  449. case KEXEC_TYPE_CRASH:
  450. pages = kimage_alloc_crash_control_pages(image, order);
  451. break;
  452. }
  453. return pages;
  454. }
  455. static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
  456. {
  457. if (*image->entry != 0)
  458. image->entry++;
  459. if (image->entry == image->last_entry) {
  460. kimage_entry_t *ind_page;
  461. struct page *page;
  462. page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
  463. if (!page)
  464. return -ENOMEM;
  465. ind_page = page_address(page);
  466. *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
  467. image->entry = ind_page;
  468. image->last_entry = ind_page +
  469. ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
  470. }
  471. *image->entry = entry;
  472. image->entry++;
  473. *image->entry = 0;
  474. return 0;
  475. }
  476. static int kimage_set_destination(struct kimage *image,
  477. unsigned long destination)
  478. {
  479. int result;
  480. destination &= PAGE_MASK;
  481. result = kimage_add_entry(image, destination | IND_DESTINATION);
  482. if (result == 0)
  483. image->destination = destination;
  484. return result;
  485. }
  486. static int kimage_add_page(struct kimage *image, unsigned long page)
  487. {
  488. int result;
  489. page &= PAGE_MASK;
  490. result = kimage_add_entry(image, page | IND_SOURCE);
  491. if (result == 0)
  492. image->destination += PAGE_SIZE;
  493. return result;
  494. }
  495. static void kimage_free_extra_pages(struct kimage *image)
  496. {
  497. /* Walk through and free any extra destination pages I may have */
  498. kimage_free_page_list(&image->dest_pages);
  499. /* Walk through and free any unuseable pages I have cached */
  500. kimage_free_page_list(&image->unuseable_pages);
  501. }
  502. static int kimage_terminate(struct kimage *image)
  503. {
  504. if (*image->entry != 0)
  505. image->entry++;
  506. *image->entry = IND_DONE;
  507. return 0;
  508. }
  509. #define for_each_kimage_entry(image, ptr, entry) \
  510. for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
  511. ptr = (entry & IND_INDIRECTION)? \
  512. phys_to_virt((entry & PAGE_MASK)): ptr +1)
  513. static void kimage_free_entry(kimage_entry_t entry)
  514. {
  515. struct page *page;
  516. page = pfn_to_page(entry >> PAGE_SHIFT);
  517. kimage_free_pages(page);
  518. }
  519. static void kimage_free(struct kimage *image)
  520. {
  521. kimage_entry_t *ptr, entry;
  522. kimage_entry_t ind = 0;
  523. if (!image)
  524. return;
  525. kimage_free_extra_pages(image);
  526. for_each_kimage_entry(image, ptr, entry) {
  527. if (entry & IND_INDIRECTION) {
  528. /* Free the previous indirection page */
  529. if (ind & IND_INDIRECTION)
  530. kimage_free_entry(ind);
  531. /* Save this indirection page until we are
  532. * done with it.
  533. */
  534. ind = entry;
  535. }
  536. else if (entry & IND_SOURCE)
  537. kimage_free_entry(entry);
  538. }
  539. /* Free the final indirection page */
  540. if (ind & IND_INDIRECTION)
  541. kimage_free_entry(ind);
  542. /* Handle any machine specific cleanup */
  543. machine_kexec_cleanup(image);
  544. /* Free the kexec control pages... */
  545. kimage_free_page_list(&image->control_pages);
  546. kfree(image);
  547. }
  548. static kimage_entry_t *kimage_dst_used(struct kimage *image,
  549. unsigned long page)
  550. {
  551. kimage_entry_t *ptr, entry;
  552. unsigned long destination = 0;
  553. for_each_kimage_entry(image, ptr, entry) {
  554. if (entry & IND_DESTINATION)
  555. destination = entry & PAGE_MASK;
  556. else if (entry & IND_SOURCE) {
  557. if (page == destination)
  558. return ptr;
  559. destination += PAGE_SIZE;
  560. }
  561. }
  562. return NULL;
  563. }
  564. static struct page *kimage_alloc_page(struct kimage *image,
  565. gfp_t gfp_mask,
  566. unsigned long destination)
  567. {
  568. /*
  569. * Here we implement safeguards to ensure that a source page
  570. * is not copied to its destination page before the data on
  571. * the destination page is no longer useful.
  572. *
  573. * To do this we maintain the invariant that a source page is
  574. * either its own destination page, or it is not a
  575. * destination page at all.
  576. *
  577. * That is slightly stronger than required, but the proof
  578. * that no problems will not occur is trivial, and the
  579. * implementation is simply to verify.
  580. *
  581. * When allocating all pages normally this algorithm will run
  582. * in O(N) time, but in the worst case it will run in O(N^2)
  583. * time. If the runtime is a problem the data structures can
  584. * be fixed.
  585. */
  586. struct page *page;
  587. unsigned long addr;
  588. /*
  589. * Walk through the list of destination pages, and see if I
  590. * have a match.
  591. */
  592. list_for_each_entry(page, &image->dest_pages, lru) {
  593. addr = page_to_pfn(page) << PAGE_SHIFT;
  594. if (addr == destination) {
  595. list_del(&page->lru);
  596. return page;
  597. }
  598. }
  599. page = NULL;
  600. while (1) {
  601. kimage_entry_t *old;
  602. /* Allocate a page, if we run out of memory give up */
  603. page = kimage_alloc_pages(gfp_mask, 0);
  604. if (!page)
  605. return NULL;
  606. /* If the page cannot be used file it away */
  607. if (page_to_pfn(page) >
  608. (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
  609. list_add(&page->lru, &image->unuseable_pages);
  610. continue;
  611. }
  612. addr = page_to_pfn(page) << PAGE_SHIFT;
  613. /* If it is the destination page we want use it */
  614. if (addr == destination)
  615. break;
  616. /* If the page is not a destination page use it */
  617. if (!kimage_is_destination_range(image, addr,
  618. addr + PAGE_SIZE))
  619. break;
  620. /*
  621. * I know that the page is someones destination page.
  622. * See if there is already a source page for this
  623. * destination page. And if so swap the source pages.
  624. */
  625. old = kimage_dst_used(image, addr);
  626. if (old) {
  627. /* If so move it */
  628. unsigned long old_addr;
  629. struct page *old_page;
  630. old_addr = *old & PAGE_MASK;
  631. old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
  632. copy_highpage(page, old_page);
  633. *old = addr | (*old & ~PAGE_MASK);
  634. /* The old page I have found cannot be a
  635. * destination page, so return it.
  636. */
  637. addr = old_addr;
  638. page = old_page;
  639. break;
  640. }
  641. else {
  642. /* Place the page on the destination list I
  643. * will use it later.
  644. */
  645. list_add(&page->lru, &image->dest_pages);
  646. }
  647. }
  648. return page;
  649. }
  650. static int kimage_load_normal_segment(struct kimage *image,
  651. struct kexec_segment *segment)
  652. {
  653. unsigned long maddr;
  654. unsigned long ubytes, mbytes;
  655. int result;
  656. unsigned char __user *buf;
  657. result = 0;
  658. buf = segment->buf;
  659. ubytes = segment->bufsz;
  660. mbytes = segment->memsz;
  661. maddr = segment->mem;
  662. result = kimage_set_destination(image, maddr);
  663. if (result < 0)
  664. goto out;
  665. while (mbytes) {
  666. struct page *page;
  667. char *ptr;
  668. size_t uchunk, mchunk;
  669. page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
  670. if (page == 0) {
  671. result = -ENOMEM;
  672. goto out;
  673. }
  674. result = kimage_add_page(image, page_to_pfn(page)
  675. << PAGE_SHIFT);
  676. if (result < 0)
  677. goto out;
  678. ptr = kmap(page);
  679. /* Start with a clear page */
  680. memset(ptr, 0, PAGE_SIZE);
  681. ptr += maddr & ~PAGE_MASK;
  682. mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
  683. if (mchunk > mbytes)
  684. mchunk = mbytes;
  685. uchunk = mchunk;
  686. if (uchunk > ubytes)
  687. uchunk = ubytes;
  688. result = copy_from_user(ptr, buf, uchunk);
  689. kunmap(page);
  690. if (result) {
  691. result = (result < 0) ? result : -EIO;
  692. goto out;
  693. }
  694. ubytes -= uchunk;
  695. maddr += mchunk;
  696. buf += mchunk;
  697. mbytes -= mchunk;
  698. }
  699. out:
  700. return result;
  701. }
  702. static int kimage_load_crash_segment(struct kimage *image,
  703. struct kexec_segment *segment)
  704. {
  705. /* For crash dumps kernels we simply copy the data from
  706. * user space to it's destination.
  707. * We do things a page at a time for the sake of kmap.
  708. */
  709. unsigned long maddr;
  710. unsigned long ubytes, mbytes;
  711. int result;
  712. unsigned char __user *buf;
  713. result = 0;
  714. buf = segment->buf;
  715. ubytes = segment->bufsz;
  716. mbytes = segment->memsz;
  717. maddr = segment->mem;
  718. while (mbytes) {
  719. struct page *page;
  720. char *ptr;
  721. size_t uchunk, mchunk;
  722. page = pfn_to_page(maddr >> PAGE_SHIFT);
  723. if (page == 0) {
  724. result = -ENOMEM;
  725. goto out;
  726. }
  727. ptr = kmap(page);
  728. ptr += maddr & ~PAGE_MASK;
  729. mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
  730. if (mchunk > mbytes)
  731. mchunk = mbytes;
  732. uchunk = mchunk;
  733. if (uchunk > ubytes) {
  734. uchunk = ubytes;
  735. /* Zero the trailing part of the page */
  736. memset(ptr + uchunk, 0, mchunk - uchunk);
  737. }
  738. result = copy_from_user(ptr, buf, uchunk);
  739. kexec_flush_icache_page(page);
  740. kunmap(page);
  741. if (result) {
  742. result = (result < 0) ? result : -EIO;
  743. goto out;
  744. }
  745. ubytes -= uchunk;
  746. maddr += mchunk;
  747. buf += mchunk;
  748. mbytes -= mchunk;
  749. }
  750. out:
  751. return result;
  752. }
  753. static int kimage_load_segment(struct kimage *image,
  754. struct kexec_segment *segment)
  755. {
  756. int result = -ENOMEM;
  757. switch (image->type) {
  758. case KEXEC_TYPE_DEFAULT:
  759. result = kimage_load_normal_segment(image, segment);
  760. break;
  761. case KEXEC_TYPE_CRASH:
  762. result = kimage_load_crash_segment(image, segment);
  763. break;
  764. }
  765. return result;
  766. }
  767. /*
  768. * Exec Kernel system call: for obvious reasons only root may call it.
  769. *
  770. * This call breaks up into three pieces.
  771. * - A generic part which loads the new kernel from the current
  772. * address space, and very carefully places the data in the
  773. * allocated pages.
  774. *
  775. * - A generic part that interacts with the kernel and tells all of
  776. * the devices to shut down. Preventing on-going dmas, and placing
  777. * the devices in a consistent state so a later kernel can
  778. * reinitialize them.
  779. *
  780. * - A machine specific part that includes the syscall number
  781. * and the copies the image to it's final destination. And
  782. * jumps into the image at entry.
  783. *
  784. * kexec does not sync, or unmount filesystems so if you need
  785. * that to happen you need to do that yourself.
  786. */
  787. struct kimage *kexec_image;
  788. struct kimage *kexec_crash_image;
  789. /*
  790. * A home grown binary mutex.
  791. * Nothing can wait so this mutex is safe to use
  792. * in interrupt context :)
  793. */
  794. static int kexec_lock;
  795. asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
  796. struct kexec_segment __user *segments,
  797. unsigned long flags)
  798. {
  799. struct kimage **dest_image, *image;
  800. int locked;
  801. int result;
  802. /* We only trust the superuser with rebooting the system. */
  803. if (!capable(CAP_SYS_BOOT))
  804. return -EPERM;
  805. /*
  806. * Verify we have a legal set of flags
  807. * This leaves us room for future extensions.
  808. */
  809. if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
  810. return -EINVAL;
  811. /* Verify we are on the appropriate architecture */
  812. if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
  813. ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
  814. return -EINVAL;
  815. /* Put an artificial cap on the number
  816. * of segments passed to kexec_load.
  817. */
  818. if (nr_segments > KEXEC_SEGMENT_MAX)
  819. return -EINVAL;
  820. image = NULL;
  821. result = 0;
  822. /* Because we write directly to the reserved memory
  823. * region when loading crash kernels we need a mutex here to
  824. * prevent multiple crash kernels from attempting to load
  825. * simultaneously, and to prevent a crash kernel from loading
  826. * over the top of a in use crash kernel.
  827. *
  828. * KISS: always take the mutex.
  829. */
  830. locked = xchg(&kexec_lock, 1);
  831. if (locked)
  832. return -EBUSY;
  833. dest_image = &kexec_image;
  834. if (flags & KEXEC_ON_CRASH)
  835. dest_image = &kexec_crash_image;
  836. if (nr_segments > 0) {
  837. unsigned long i;
  838. /* Loading another kernel to reboot into */
  839. if ((flags & KEXEC_ON_CRASH) == 0)
  840. result = kimage_normal_alloc(&image, entry,
  841. nr_segments, segments);
  842. /* Loading another kernel to switch to if this one crashes */
  843. else if (flags & KEXEC_ON_CRASH) {
  844. /* Free any current crash dump kernel before
  845. * we corrupt it.
  846. */
  847. kimage_free(xchg(&kexec_crash_image, NULL));
  848. result = kimage_crash_alloc(&image, entry,
  849. nr_segments, segments);
  850. }
  851. if (result)
  852. goto out;
  853. result = machine_kexec_prepare(image);
  854. if (result)
  855. goto out;
  856. for (i = 0; i < nr_segments; i++) {
  857. result = kimage_load_segment(image, &image->segment[i]);
  858. if (result)
  859. goto out;
  860. }
  861. result = kimage_terminate(image);
  862. if (result)
  863. goto out;
  864. }
  865. /* Install the new kernel, and Uninstall the old */
  866. image = xchg(dest_image, image);
  867. out:
  868. locked = xchg(&kexec_lock, 0); /* Release the mutex */
  869. BUG_ON(!locked);
  870. kimage_free(image);
  871. return result;
  872. }
  873. #ifdef CONFIG_COMPAT
  874. asmlinkage long compat_sys_kexec_load(unsigned long entry,
  875. unsigned long nr_segments,
  876. struct compat_kexec_segment __user *segments,
  877. unsigned long flags)
  878. {
  879. struct compat_kexec_segment in;
  880. struct kexec_segment out, __user *ksegments;
  881. unsigned long i, result;
  882. /* Don't allow clients that don't understand the native
  883. * architecture to do anything.
  884. */
  885. if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
  886. return -EINVAL;
  887. if (nr_segments > KEXEC_SEGMENT_MAX)
  888. return -EINVAL;
  889. ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
  890. for (i=0; i < nr_segments; i++) {
  891. result = copy_from_user(&in, &segments[i], sizeof(in));
  892. if (result)
  893. return -EFAULT;
  894. out.buf = compat_ptr(in.buf);
  895. out.bufsz = in.bufsz;
  896. out.mem = in.mem;
  897. out.memsz = in.memsz;
  898. result = copy_to_user(&ksegments[i], &out, sizeof(out));
  899. if (result)
  900. return -EFAULT;
  901. }
  902. return sys_kexec_load(entry, nr_segments, ksegments, flags);
  903. }
  904. #endif
  905. void crash_kexec(struct pt_regs *regs)
  906. {
  907. int locked;
  908. /* Take the kexec_lock here to prevent sys_kexec_load
  909. * running on one cpu from replacing the crash kernel
  910. * we are using after a panic on a different cpu.
  911. *
  912. * If the crash kernel was not located in a fixed area
  913. * of memory the xchg(&kexec_crash_image) would be
  914. * sufficient. But since I reuse the memory...
  915. */
  916. locked = xchg(&kexec_lock, 1);
  917. if (!locked) {
  918. if (kexec_crash_image) {
  919. struct pt_regs fixed_regs;
  920. crash_setup_regs(&fixed_regs, regs);
  921. machine_crash_shutdown(&fixed_regs);
  922. machine_kexec(kexec_crash_image);
  923. }
  924. locked = xchg(&kexec_lock, 0);
  925. BUG_ON(!locked);
  926. }
  927. }
  928. static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
  929. size_t data_len)
  930. {
  931. struct elf_note note;
  932. note.n_namesz = strlen(name) + 1;
  933. note.n_descsz = data_len;
  934. note.n_type = type;
  935. memcpy(buf, &note, sizeof(note));
  936. buf += (sizeof(note) + 3)/4;
  937. memcpy(buf, name, note.n_namesz);
  938. buf += (note.n_namesz + 3)/4;
  939. memcpy(buf, data, note.n_descsz);
  940. buf += (note.n_descsz + 3)/4;
  941. return buf;
  942. }
  943. static void final_note(u32 *buf)
  944. {
  945. struct elf_note note;
  946. note.n_namesz = 0;
  947. note.n_descsz = 0;
  948. note.n_type = 0;
  949. memcpy(buf, &note, sizeof(note));
  950. }
  951. void crash_save_cpu(struct pt_regs *regs, int cpu)
  952. {
  953. struct elf_prstatus prstatus;
  954. u32 *buf;
  955. if ((cpu < 0) || (cpu >= NR_CPUS))
  956. return;
  957. /* Using ELF notes here is opportunistic.
  958. * I need a well defined structure format
  959. * for the data I pass, and I need tags
  960. * on the data to indicate what information I have
  961. * squirrelled away. ELF notes happen to provide
  962. * all of that, so there is no need to invent something new.
  963. */
  964. buf = (u32*)per_cpu_ptr(crash_notes, cpu);
  965. if (!buf)
  966. return;
  967. memset(&prstatus, 0, sizeof(prstatus));
  968. prstatus.pr_pid = current->pid;
  969. elf_core_copy_regs(&prstatus.pr_reg, regs);
  970. buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
  971. sizeof(prstatus));
  972. final_note(buf);
  973. }
  974. static int __init crash_notes_memory_init(void)
  975. {
  976. /* Allocate memory for saving cpu registers. */
  977. crash_notes = alloc_percpu(note_buf_t);
  978. if (!crash_notes) {
  979. printk("Kexec: Memory allocation for saving cpu register"
  980. " states failed\n");
  981. return -ENOMEM;
  982. }
  983. return 0;
  984. }
  985. module_init(crash_notes_memory_init)