sysfs.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * bcache sysfs interfaces
  4. *
  5. * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  6. * Copyright 2012 Google, Inc.
  7. */
  8. #include "bcache.h"
  9. #include "sysfs.h"
  10. #include "btree.h"
  11. #include "request.h"
  12. #include "writeback.h"
  13. #include "features.h"
  14. #include <linux/blkdev.h>
  15. #include <linux/sort.h>
  16. #include <linux/sched/clock.h>
  17. extern bool bcache_is_reboot;
  18. /* Default is 0 ("writethrough") */
  19. static const char * const bch_cache_modes[] = {
  20. "writethrough",
  21. "writeback",
  22. "writearound",
  23. "none",
  24. NULL
  25. };
  26. static const char * const bch_reada_cache_policies[] = {
  27. "all",
  28. "meta-only",
  29. NULL
  30. };
  31. /* Default is 0 ("auto") */
  32. static const char * const bch_stop_on_failure_modes[] = {
  33. "auto",
  34. "always",
  35. NULL
  36. };
  37. static const char * const cache_replacement_policies[] = {
  38. "lru",
  39. "fifo",
  40. "random",
  41. NULL
  42. };
  43. static const char * const error_actions[] = {
  44. "unregister",
  45. "panic",
  46. NULL
  47. };
  48. write_attribute(attach);
  49. write_attribute(detach);
  50. write_attribute(unregister);
  51. write_attribute(stop);
  52. write_attribute(clear_stats);
  53. write_attribute(trigger_gc);
  54. write_attribute(prune_cache);
  55. write_attribute(flash_vol_create);
  56. read_attribute(bucket_size);
  57. read_attribute(block_size);
  58. read_attribute(nbuckets);
  59. read_attribute(tree_depth);
  60. read_attribute(root_usage_percent);
  61. read_attribute(priority_stats);
  62. read_attribute(btree_cache_size);
  63. read_attribute(btree_cache_max_chain);
  64. read_attribute(cache_available_percent);
  65. read_attribute(written);
  66. read_attribute(btree_written);
  67. read_attribute(metadata_written);
  68. read_attribute(active_journal_entries);
  69. read_attribute(backing_dev_name);
  70. read_attribute(backing_dev_uuid);
  71. sysfs_time_stats_attribute(btree_gc, sec, ms);
  72. sysfs_time_stats_attribute(btree_split, sec, us);
  73. sysfs_time_stats_attribute(btree_sort, ms, us);
  74. sysfs_time_stats_attribute(btree_read, ms, us);
  75. read_attribute(btree_nodes);
  76. read_attribute(btree_used_percent);
  77. read_attribute(average_key_size);
  78. read_attribute(dirty_data);
  79. read_attribute(bset_tree_stats);
  80. read_attribute(feature_compat);
  81. read_attribute(feature_ro_compat);
  82. read_attribute(feature_incompat);
  83. read_attribute(state);
  84. read_attribute(cache_read_races);
  85. read_attribute(reclaim);
  86. read_attribute(reclaimed_journal_buckets);
  87. read_attribute(flush_write);
  88. read_attribute(writeback_keys_done);
  89. read_attribute(writeback_keys_failed);
  90. read_attribute(io_errors);
  91. read_attribute(congested);
  92. read_attribute(cutoff_writeback);
  93. read_attribute(cutoff_writeback_sync);
  94. rw_attribute(congested_read_threshold_us);
  95. rw_attribute(congested_write_threshold_us);
  96. rw_attribute(sequential_cutoff);
  97. rw_attribute(data_csum);
  98. rw_attribute(cache_mode);
  99. rw_attribute(readahead_cache_policy);
  100. rw_attribute(stop_when_cache_set_failed);
  101. rw_attribute(writeback_metadata);
  102. rw_attribute(writeback_running);
  103. rw_attribute(writeback_percent);
  104. rw_attribute(writeback_delay);
  105. rw_attribute(writeback_rate);
  106. rw_attribute(writeback_rate_update_seconds);
  107. rw_attribute(writeback_rate_i_term_inverse);
  108. rw_attribute(writeback_rate_p_term_inverse);
  109. rw_attribute(writeback_rate_minimum);
  110. read_attribute(writeback_rate_debug);
  111. read_attribute(stripe_size);
  112. read_attribute(partial_stripes_expensive);
  113. rw_attribute(synchronous);
  114. rw_attribute(journal_delay_ms);
  115. rw_attribute(io_disable);
  116. rw_attribute(discard);
  117. rw_attribute(running);
  118. rw_attribute(label);
  119. rw_attribute(readahead);
  120. rw_attribute(errors);
  121. rw_attribute(io_error_limit);
  122. rw_attribute(io_error_halflife);
  123. rw_attribute(verify);
  124. rw_attribute(bypass_torture_test);
  125. rw_attribute(key_merging_disabled);
  126. rw_attribute(gc_always_rewrite);
  127. rw_attribute(expensive_debug_checks);
  128. rw_attribute(cache_replacement_policy);
  129. rw_attribute(btree_shrinker_disabled);
  130. rw_attribute(copy_gc_enabled);
  131. rw_attribute(idle_max_writeback_rate);
  132. rw_attribute(gc_after_writeback);
  133. rw_attribute(size);
  134. static ssize_t bch_snprint_string_list(char *buf,
  135. size_t size,
  136. const char * const list[],
  137. size_t selected)
  138. {
  139. char *out = buf;
  140. size_t i;
  141. for (i = 0; list[i]; i++)
  142. out += scnprintf(out, buf + size - out,
  143. i == selected ? "[%s] " : "%s ", list[i]);
  144. out[-1] = '\n';
  145. return out - buf;
  146. }
  147. SHOW(__bch_cached_dev)
  148. {
  149. struct cached_dev *dc = container_of(kobj, struct cached_dev,
  150. disk.kobj);
  151. char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
  152. int wb = dc->writeback_running;
  153. #define var(stat) (dc->stat)
  154. if (attr == &sysfs_cache_mode)
  155. return bch_snprint_string_list(buf, PAGE_SIZE,
  156. bch_cache_modes,
  157. BDEV_CACHE_MODE(&dc->sb));
  158. if (attr == &sysfs_readahead_cache_policy)
  159. return bch_snprint_string_list(buf, PAGE_SIZE,
  160. bch_reada_cache_policies,
  161. dc->cache_readahead_policy);
  162. if (attr == &sysfs_stop_when_cache_set_failed)
  163. return bch_snprint_string_list(buf, PAGE_SIZE,
  164. bch_stop_on_failure_modes,
  165. dc->stop_when_cache_set_failed);
  166. sysfs_printf(data_csum, "%i", dc->disk.data_csum);
  167. var_printf(verify, "%i");
  168. var_printf(bypass_torture_test, "%i");
  169. var_printf(writeback_metadata, "%i");
  170. var_printf(writeback_running, "%i");
  171. var_print(writeback_delay);
  172. var_print(writeback_percent);
  173. sysfs_hprint(writeback_rate,
  174. wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
  175. sysfs_printf(io_errors, "%i", atomic_read(&dc->io_errors));
  176. sysfs_printf(io_error_limit, "%i", dc->error_limit);
  177. sysfs_printf(io_disable, "%i", dc->io_disable);
  178. var_print(writeback_rate_update_seconds);
  179. var_print(writeback_rate_i_term_inverse);
  180. var_print(writeback_rate_p_term_inverse);
  181. var_print(writeback_rate_minimum);
  182. if (attr == &sysfs_writeback_rate_debug) {
  183. char rate[20];
  184. char dirty[20];
  185. char target[20];
  186. char proportional[20];
  187. char integral[20];
  188. char change[20];
  189. s64 next_io;
  190. /*
  191. * Except for dirty and target, other values should
  192. * be 0 if writeback is not running.
  193. */
  194. bch_hprint(rate,
  195. wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
  196. : 0);
  197. bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
  198. bch_hprint(target, dc->writeback_rate_target << 9);
  199. bch_hprint(proportional,
  200. wb ? dc->writeback_rate_proportional << 9 : 0);
  201. bch_hprint(integral,
  202. wb ? dc->writeback_rate_integral_scaled << 9 : 0);
  203. bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
  204. next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
  205. NSEC_PER_MSEC) : 0;
  206. return sprintf(buf,
  207. "rate:\t\t%s/sec\n"
  208. "dirty:\t\t%s\n"
  209. "target:\t\t%s\n"
  210. "proportional:\t%s\n"
  211. "integral:\t%s\n"
  212. "change:\t\t%s/sec\n"
  213. "next io:\t%llims\n",
  214. rate, dirty, target, proportional,
  215. integral, change, next_io);
  216. }
  217. sysfs_hprint(dirty_data,
  218. bcache_dev_sectors_dirty(&dc->disk) << 9);
  219. sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9);
  220. var_printf(partial_stripes_expensive, "%u");
  221. var_hprint(sequential_cutoff);
  222. var_hprint(readahead);
  223. sysfs_print(running, atomic_read(&dc->running));
  224. sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
  225. if (attr == &sysfs_label) {
  226. memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
  227. buf[SB_LABEL_SIZE + 1] = '\0';
  228. strcat(buf, "\n");
  229. return strlen(buf);
  230. }
  231. if (attr == &sysfs_backing_dev_name) {
  232. snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
  233. strcat(buf, "\n");
  234. return strlen(buf);
  235. }
  236. if (attr == &sysfs_backing_dev_uuid) {
  237. /* convert binary uuid into 36-byte string plus '\0' */
  238. snprintf(buf, 36+1, "%pU", dc->sb.uuid);
  239. strcat(buf, "\n");
  240. return strlen(buf);
  241. }
  242. #undef var
  243. return 0;
  244. }
  245. SHOW_LOCKED(bch_cached_dev)
  246. STORE(__cached_dev)
  247. {
  248. struct cached_dev *dc = container_of(kobj, struct cached_dev,
  249. disk.kobj);
  250. ssize_t v;
  251. struct cache_set *c;
  252. struct kobj_uevent_env *env;
  253. /* no user space access if system is rebooting */
  254. if (bcache_is_reboot)
  255. return -EBUSY;
  256. #define d_strtoul(var) sysfs_strtoul(var, dc->var)
  257. #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
  258. #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
  259. sysfs_strtoul(data_csum, dc->disk.data_csum);
  260. d_strtoul(verify);
  261. sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
  262. sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
  263. sysfs_strtoul_bool(writeback_running, dc->writeback_running);
  264. sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
  265. sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
  266. 0, bch_cutoff_writeback);
  267. if (attr == &sysfs_writeback_rate) {
  268. ssize_t ret;
  269. long int v = atomic_long_read(&dc->writeback_rate.rate);
  270. ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
  271. if (!ret) {
  272. atomic_long_set(&dc->writeback_rate.rate, v);
  273. ret = size;
  274. }
  275. return ret;
  276. }
  277. sysfs_strtoul_clamp(writeback_rate_update_seconds,
  278. dc->writeback_rate_update_seconds,
  279. 1, WRITEBACK_RATE_UPDATE_SECS_MAX);
  280. sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
  281. dc->writeback_rate_i_term_inverse,
  282. 1, UINT_MAX);
  283. sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
  284. dc->writeback_rate_p_term_inverse,
  285. 1, UINT_MAX);
  286. sysfs_strtoul_clamp(writeback_rate_minimum,
  287. dc->writeback_rate_minimum,
  288. 1, UINT_MAX);
  289. sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
  290. if (attr == &sysfs_io_disable) {
  291. int v = strtoul_or_return(buf);
  292. dc->io_disable = v ? 1 : 0;
  293. }
  294. sysfs_strtoul_clamp(sequential_cutoff,
  295. dc->sequential_cutoff,
  296. 0, UINT_MAX);
  297. d_strtoi_h(readahead);
  298. if (attr == &sysfs_clear_stats)
  299. bch_cache_accounting_clear(&dc->accounting);
  300. if (attr == &sysfs_running &&
  301. strtoul_or_return(buf)) {
  302. v = bch_cached_dev_run(dc);
  303. if (v)
  304. return v;
  305. }
  306. if (attr == &sysfs_cache_mode) {
  307. v = __sysfs_match_string(bch_cache_modes, -1, buf);
  308. if (v < 0)
  309. return v;
  310. if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
  311. SET_BDEV_CACHE_MODE(&dc->sb, v);
  312. bch_write_bdev_super(dc, NULL);
  313. }
  314. }
  315. if (attr == &sysfs_readahead_cache_policy) {
  316. v = __sysfs_match_string(bch_reada_cache_policies, -1, buf);
  317. if (v < 0)
  318. return v;
  319. if ((unsigned int) v != dc->cache_readahead_policy)
  320. dc->cache_readahead_policy = v;
  321. }
  322. if (attr == &sysfs_stop_when_cache_set_failed) {
  323. v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
  324. if (v < 0)
  325. return v;
  326. dc->stop_when_cache_set_failed = v;
  327. }
  328. if (attr == &sysfs_label) {
  329. if (size > SB_LABEL_SIZE)
  330. return -EINVAL;
  331. memcpy(dc->sb.label, buf, size);
  332. if (size < SB_LABEL_SIZE)
  333. dc->sb.label[size] = '\0';
  334. if (size && dc->sb.label[size - 1] == '\n')
  335. dc->sb.label[size - 1] = '\0';
  336. bch_write_bdev_super(dc, NULL);
  337. if (dc->disk.c) {
  338. memcpy(dc->disk.c->uuids[dc->disk.id].label,
  339. buf, SB_LABEL_SIZE);
  340. bch_uuid_write(dc->disk.c);
  341. }
  342. env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
  343. if (!env)
  344. return -ENOMEM;
  345. add_uevent_var(env, "DRIVER=bcache");
  346. add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
  347. add_uevent_var(env, "CACHED_LABEL=%s", buf);
  348. kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
  349. KOBJ_CHANGE,
  350. env->envp);
  351. kfree(env);
  352. }
  353. if (attr == &sysfs_attach) {
  354. uint8_t set_uuid[16];
  355. if (bch_parse_uuid(buf, set_uuid) < 16)
  356. return -EINVAL;
  357. v = -ENOENT;
  358. list_for_each_entry(c, &bch_cache_sets, list) {
  359. v = bch_cached_dev_attach(dc, c, set_uuid);
  360. if (!v)
  361. return size;
  362. }
  363. if (v == -ENOENT)
  364. pr_err("Can't attach %s: cache set not found\n", buf);
  365. return v;
  366. }
  367. if (attr == &sysfs_detach && dc->disk.c)
  368. bch_cached_dev_detach(dc);
  369. if (attr == &sysfs_stop)
  370. bcache_device_stop(&dc->disk);
  371. return size;
  372. }
  373. STORE(bch_cached_dev)
  374. {
  375. struct cached_dev *dc = container_of(kobj, struct cached_dev,
  376. disk.kobj);
  377. /* no user space access if system is rebooting */
  378. if (bcache_is_reboot)
  379. return -EBUSY;
  380. mutex_lock(&bch_register_lock);
  381. size = __cached_dev_store(kobj, attr, buf, size);
  382. if (attr == &sysfs_writeback_running) {
  383. /* dc->writeback_running changed in __cached_dev_store() */
  384. if (IS_ERR_OR_NULL(dc->writeback_thread)) {
  385. /*
  386. * reject setting it to 1 via sysfs if writeback
  387. * kthread is not created yet.
  388. */
  389. if (dc->writeback_running) {
  390. dc->writeback_running = false;
  391. pr_err("%s: failed to run non-existent writeback thread\n",
  392. dc->disk.disk->disk_name);
  393. }
  394. } else
  395. /*
  396. * writeback kthread will check if dc->writeback_running
  397. * is true or false.
  398. */
  399. bch_writeback_queue(dc);
  400. }
  401. /*
  402. * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
  403. * a cache set, otherwise it doesn't make sense.
  404. */
  405. if (attr == &sysfs_writeback_percent)
  406. if ((dc->disk.c != NULL) &&
  407. (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
  408. schedule_delayed_work(&dc->writeback_rate_update,
  409. dc->writeback_rate_update_seconds * HZ);
  410. mutex_unlock(&bch_register_lock);
  411. return size;
  412. }
  413. static struct attribute *bch_cached_dev_files[] = {
  414. &sysfs_attach,
  415. &sysfs_detach,
  416. &sysfs_stop,
  417. #if 0
  418. &sysfs_data_csum,
  419. #endif
  420. &sysfs_cache_mode,
  421. &sysfs_readahead_cache_policy,
  422. &sysfs_stop_when_cache_set_failed,
  423. &sysfs_writeback_metadata,
  424. &sysfs_writeback_running,
  425. &sysfs_writeback_delay,
  426. &sysfs_writeback_percent,
  427. &sysfs_writeback_rate,
  428. &sysfs_writeback_rate_update_seconds,
  429. &sysfs_writeback_rate_i_term_inverse,
  430. &sysfs_writeback_rate_p_term_inverse,
  431. &sysfs_writeback_rate_minimum,
  432. &sysfs_writeback_rate_debug,
  433. &sysfs_io_errors,
  434. &sysfs_io_error_limit,
  435. &sysfs_io_disable,
  436. &sysfs_dirty_data,
  437. &sysfs_stripe_size,
  438. &sysfs_partial_stripes_expensive,
  439. &sysfs_sequential_cutoff,
  440. &sysfs_clear_stats,
  441. &sysfs_running,
  442. &sysfs_state,
  443. &sysfs_label,
  444. &sysfs_readahead,
  445. #ifdef CONFIG_BCACHE_DEBUG
  446. &sysfs_verify,
  447. &sysfs_bypass_torture_test,
  448. #endif
  449. &sysfs_backing_dev_name,
  450. &sysfs_backing_dev_uuid,
  451. NULL
  452. };
  453. KTYPE(bch_cached_dev);
  454. SHOW(bch_flash_dev)
  455. {
  456. struct bcache_device *d = container_of(kobj, struct bcache_device,
  457. kobj);
  458. struct uuid_entry *u = &d->c->uuids[d->id];
  459. sysfs_printf(data_csum, "%i", d->data_csum);
  460. sysfs_hprint(size, u->sectors << 9);
  461. if (attr == &sysfs_label) {
  462. memcpy(buf, u->label, SB_LABEL_SIZE);
  463. buf[SB_LABEL_SIZE + 1] = '\0';
  464. strcat(buf, "\n");
  465. return strlen(buf);
  466. }
  467. return 0;
  468. }
  469. STORE(__bch_flash_dev)
  470. {
  471. struct bcache_device *d = container_of(kobj, struct bcache_device,
  472. kobj);
  473. struct uuid_entry *u = &d->c->uuids[d->id];
  474. /* no user space access if system is rebooting */
  475. if (bcache_is_reboot)
  476. return -EBUSY;
  477. sysfs_strtoul(data_csum, d->data_csum);
  478. if (attr == &sysfs_size) {
  479. uint64_t v;
  480. strtoi_h_or_return(buf, v);
  481. u->sectors = v >> 9;
  482. bch_uuid_write(d->c);
  483. set_capacity(d->disk, u->sectors);
  484. }
  485. if (attr == &sysfs_label) {
  486. memcpy(u->label, buf, SB_LABEL_SIZE);
  487. bch_uuid_write(d->c);
  488. }
  489. if (attr == &sysfs_unregister) {
  490. set_bit(BCACHE_DEV_DETACHING, &d->flags);
  491. bcache_device_stop(d);
  492. }
  493. return size;
  494. }
  495. STORE_LOCKED(bch_flash_dev)
  496. static struct attribute *bch_flash_dev_files[] = {
  497. &sysfs_unregister,
  498. #if 0
  499. &sysfs_data_csum,
  500. #endif
  501. &sysfs_label,
  502. &sysfs_size,
  503. NULL
  504. };
  505. KTYPE(bch_flash_dev);
  506. struct bset_stats_op {
  507. struct btree_op op;
  508. size_t nodes;
  509. struct bset_stats stats;
  510. };
  511. static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
  512. {
  513. struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
  514. op->nodes++;
  515. bch_btree_keys_stats(&b->keys, &op->stats);
  516. return MAP_CONTINUE;
  517. }
  518. static int bch_bset_print_stats(struct cache_set *c, char *buf)
  519. {
  520. struct bset_stats_op op;
  521. int ret;
  522. memset(&op, 0, sizeof(op));
  523. bch_btree_op_init(&op.op, -1);
  524. ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
  525. if (ret < 0)
  526. return ret;
  527. return snprintf(buf, PAGE_SIZE,
  528. "btree nodes: %zu\n"
  529. "written sets: %zu\n"
  530. "unwritten sets: %zu\n"
  531. "written key bytes: %zu\n"
  532. "unwritten key bytes: %zu\n"
  533. "floats: %zu\n"
  534. "failed: %zu\n",
  535. op.nodes,
  536. op.stats.sets_written, op.stats.sets_unwritten,
  537. op.stats.bytes_written, op.stats.bytes_unwritten,
  538. op.stats.floats, op.stats.failed);
  539. }
  540. static unsigned int bch_root_usage(struct cache_set *c)
  541. {
  542. unsigned int bytes = 0;
  543. struct bkey *k;
  544. struct btree *b;
  545. struct btree_iter iter;
  546. goto lock_root;
  547. do {
  548. rw_unlock(false, b);
  549. lock_root:
  550. b = c->root;
  551. rw_lock(false, b, b->level);
  552. } while (b != c->root);
  553. for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
  554. bytes += bkey_bytes(k);
  555. rw_unlock(false, b);
  556. return (bytes * 100) / btree_bytes(c);
  557. }
  558. static size_t bch_cache_size(struct cache_set *c)
  559. {
  560. size_t ret = 0;
  561. struct btree *b;
  562. mutex_lock(&c->bucket_lock);
  563. list_for_each_entry(b, &c->btree_cache, list)
  564. ret += 1 << (b->keys.page_order + PAGE_SHIFT);
  565. mutex_unlock(&c->bucket_lock);
  566. return ret;
  567. }
  568. static unsigned int bch_cache_max_chain(struct cache_set *c)
  569. {
  570. unsigned int ret = 0;
  571. struct hlist_head *h;
  572. mutex_lock(&c->bucket_lock);
  573. for (h = c->bucket_hash;
  574. h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
  575. h++) {
  576. unsigned int i = 0;
  577. struct hlist_node *p;
  578. hlist_for_each(p, h)
  579. i++;
  580. ret = max(ret, i);
  581. }
  582. mutex_unlock(&c->bucket_lock);
  583. return ret;
  584. }
  585. static unsigned int bch_btree_used(struct cache_set *c)
  586. {
  587. return div64_u64(c->gc_stats.key_bytes * 100,
  588. (c->gc_stats.nodes ?: 1) * btree_bytes(c));
  589. }
  590. static unsigned int bch_average_key_size(struct cache_set *c)
  591. {
  592. return c->gc_stats.nkeys
  593. ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
  594. : 0;
  595. }
  596. SHOW(__bch_cache_set)
  597. {
  598. struct cache_set *c = container_of(kobj, struct cache_set, kobj);
  599. sysfs_print(synchronous, CACHE_SYNC(&c->cache->sb));
  600. sysfs_print(journal_delay_ms, c->journal_delay_ms);
  601. sysfs_hprint(bucket_size, bucket_bytes(c->cache));
  602. sysfs_hprint(block_size, block_bytes(c->cache));
  603. sysfs_print(tree_depth, c->root->level);
  604. sysfs_print(root_usage_percent, bch_root_usage(c));
  605. sysfs_hprint(btree_cache_size, bch_cache_size(c));
  606. sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
  607. sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
  608. sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
  609. sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
  610. sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
  611. sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
  612. sysfs_print(btree_used_percent, bch_btree_used(c));
  613. sysfs_print(btree_nodes, c->gc_stats.nodes);
  614. sysfs_hprint(average_key_size, bch_average_key_size(c));
  615. sysfs_print(cache_read_races,
  616. atomic_long_read(&c->cache_read_races));
  617. sysfs_print(reclaim,
  618. atomic_long_read(&c->reclaim));
  619. sysfs_print(reclaimed_journal_buckets,
  620. atomic_long_read(&c->reclaimed_journal_buckets));
  621. sysfs_print(flush_write,
  622. atomic_long_read(&c->flush_write));
  623. sysfs_print(writeback_keys_done,
  624. atomic_long_read(&c->writeback_keys_done));
  625. sysfs_print(writeback_keys_failed,
  626. atomic_long_read(&c->writeback_keys_failed));
  627. if (attr == &sysfs_errors)
  628. return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
  629. c->on_error);
  630. /* See count_io_errors for why 88 */
  631. sysfs_print(io_error_halflife, c->error_decay * 88);
  632. sysfs_print(io_error_limit, c->error_limit);
  633. sysfs_hprint(congested,
  634. ((uint64_t) bch_get_congested(c)) << 9);
  635. sysfs_print(congested_read_threshold_us,
  636. c->congested_read_threshold_us);
  637. sysfs_print(congested_write_threshold_us,
  638. c->congested_write_threshold_us);
  639. sysfs_print(cutoff_writeback, bch_cutoff_writeback);
  640. sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
  641. sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
  642. sysfs_printf(verify, "%i", c->verify);
  643. sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
  644. sysfs_printf(expensive_debug_checks,
  645. "%i", c->expensive_debug_checks);
  646. sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
  647. sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
  648. sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
  649. sysfs_printf(idle_max_writeback_rate, "%i",
  650. c->idle_max_writeback_rate_enabled);
  651. sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback);
  652. sysfs_printf(io_disable, "%i",
  653. test_bit(CACHE_SET_IO_DISABLE, &c->flags));
  654. if (attr == &sysfs_bset_tree_stats)
  655. return bch_bset_print_stats(c, buf);
  656. if (attr == &sysfs_feature_compat)
  657. return bch_print_cache_set_feature_compat(c, buf, PAGE_SIZE);
  658. if (attr == &sysfs_feature_ro_compat)
  659. return bch_print_cache_set_feature_ro_compat(c, buf, PAGE_SIZE);
  660. if (attr == &sysfs_feature_incompat)
  661. return bch_print_cache_set_feature_incompat(c, buf, PAGE_SIZE);
  662. return 0;
  663. }
  664. SHOW_LOCKED(bch_cache_set)
  665. STORE(__bch_cache_set)
  666. {
  667. struct cache_set *c = container_of(kobj, struct cache_set, kobj);
  668. ssize_t v;
  669. /* no user space access if system is rebooting */
  670. if (bcache_is_reboot)
  671. return -EBUSY;
  672. if (attr == &sysfs_unregister)
  673. bch_cache_set_unregister(c);
  674. if (attr == &sysfs_stop)
  675. bch_cache_set_stop(c);
  676. if (attr == &sysfs_synchronous) {
  677. bool sync = strtoul_or_return(buf);
  678. if (sync != CACHE_SYNC(&c->cache->sb)) {
  679. SET_CACHE_SYNC(&c->cache->sb, sync);
  680. bcache_write_super(c);
  681. }
  682. }
  683. if (attr == &sysfs_flash_vol_create) {
  684. int r;
  685. uint64_t v;
  686. strtoi_h_or_return(buf, v);
  687. r = bch_flash_dev_create(c, v);
  688. if (r)
  689. return r;
  690. }
  691. if (attr == &sysfs_clear_stats) {
  692. atomic_long_set(&c->writeback_keys_done, 0);
  693. atomic_long_set(&c->writeback_keys_failed, 0);
  694. memset(&c->gc_stats, 0, sizeof(struct gc_stat));
  695. bch_cache_accounting_clear(&c->accounting);
  696. }
  697. if (attr == &sysfs_trigger_gc)
  698. force_wake_up_gc(c);
  699. if (attr == &sysfs_prune_cache) {
  700. struct shrink_control sc;
  701. sc.gfp_mask = GFP_KERNEL;
  702. sc.nr_to_scan = strtoul_or_return(buf);
  703. c->shrink.scan_objects(&c->shrink, &sc);
  704. }
  705. sysfs_strtoul_clamp(congested_read_threshold_us,
  706. c->congested_read_threshold_us,
  707. 0, UINT_MAX);
  708. sysfs_strtoul_clamp(congested_write_threshold_us,
  709. c->congested_write_threshold_us,
  710. 0, UINT_MAX);
  711. if (attr == &sysfs_errors) {
  712. v = __sysfs_match_string(error_actions, -1, buf);
  713. if (v < 0)
  714. return v;
  715. c->on_error = v;
  716. }
  717. sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX);
  718. /* See count_io_errors() for why 88 */
  719. if (attr == &sysfs_io_error_halflife) {
  720. unsigned long v = 0;
  721. ssize_t ret;
  722. ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
  723. if (!ret) {
  724. c->error_decay = v / 88;
  725. return size;
  726. }
  727. return ret;
  728. }
  729. if (attr == &sysfs_io_disable) {
  730. v = strtoul_or_return(buf);
  731. if (v) {
  732. if (test_and_set_bit(CACHE_SET_IO_DISABLE,
  733. &c->flags))
  734. pr_warn("CACHE_SET_IO_DISABLE already set\n");
  735. } else {
  736. if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
  737. &c->flags))
  738. pr_warn("CACHE_SET_IO_DISABLE already cleared\n");
  739. }
  740. }
  741. sysfs_strtoul_clamp(journal_delay_ms,
  742. c->journal_delay_ms,
  743. 0, USHRT_MAX);
  744. sysfs_strtoul_bool(verify, c->verify);
  745. sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
  746. sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
  747. sysfs_strtoul_bool(gc_always_rewrite, c->gc_always_rewrite);
  748. sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
  749. sysfs_strtoul_bool(copy_gc_enabled, c->copy_gc_enabled);
  750. sysfs_strtoul_bool(idle_max_writeback_rate,
  751. c->idle_max_writeback_rate_enabled);
  752. /*
  753. * write gc_after_writeback here may overwrite an already set
  754. * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
  755. * set in next chance.
  756. */
  757. sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
  758. return size;
  759. }
  760. STORE_LOCKED(bch_cache_set)
  761. SHOW(bch_cache_set_internal)
  762. {
  763. struct cache_set *c = container_of(kobj, struct cache_set, internal);
  764. return bch_cache_set_show(&c->kobj, attr, buf);
  765. }
  766. STORE(bch_cache_set_internal)
  767. {
  768. struct cache_set *c = container_of(kobj, struct cache_set, internal);
  769. /* no user space access if system is rebooting */
  770. if (bcache_is_reboot)
  771. return -EBUSY;
  772. return bch_cache_set_store(&c->kobj, attr, buf, size);
  773. }
  774. static void bch_cache_set_internal_release(struct kobject *k)
  775. {
  776. }
  777. static struct attribute *bch_cache_set_files[] = {
  778. &sysfs_unregister,
  779. &sysfs_stop,
  780. &sysfs_synchronous,
  781. &sysfs_journal_delay_ms,
  782. &sysfs_flash_vol_create,
  783. &sysfs_bucket_size,
  784. &sysfs_block_size,
  785. &sysfs_tree_depth,
  786. &sysfs_root_usage_percent,
  787. &sysfs_btree_cache_size,
  788. &sysfs_cache_available_percent,
  789. &sysfs_average_key_size,
  790. &sysfs_errors,
  791. &sysfs_io_error_limit,
  792. &sysfs_io_error_halflife,
  793. &sysfs_congested,
  794. &sysfs_congested_read_threshold_us,
  795. &sysfs_congested_write_threshold_us,
  796. &sysfs_clear_stats,
  797. NULL
  798. };
  799. KTYPE(bch_cache_set);
  800. static struct attribute *bch_cache_set_internal_files[] = {
  801. &sysfs_active_journal_entries,
  802. sysfs_time_stats_attribute_list(btree_gc, sec, ms)
  803. sysfs_time_stats_attribute_list(btree_split, sec, us)
  804. sysfs_time_stats_attribute_list(btree_sort, ms, us)
  805. sysfs_time_stats_attribute_list(btree_read, ms, us)
  806. &sysfs_btree_nodes,
  807. &sysfs_btree_used_percent,
  808. &sysfs_btree_cache_max_chain,
  809. &sysfs_bset_tree_stats,
  810. &sysfs_cache_read_races,
  811. &sysfs_reclaim,
  812. &sysfs_reclaimed_journal_buckets,
  813. &sysfs_flush_write,
  814. &sysfs_writeback_keys_done,
  815. &sysfs_writeback_keys_failed,
  816. &sysfs_trigger_gc,
  817. &sysfs_prune_cache,
  818. #ifdef CONFIG_BCACHE_DEBUG
  819. &sysfs_verify,
  820. &sysfs_key_merging_disabled,
  821. &sysfs_expensive_debug_checks,
  822. #endif
  823. &sysfs_gc_always_rewrite,
  824. &sysfs_btree_shrinker_disabled,
  825. &sysfs_copy_gc_enabled,
  826. &sysfs_idle_max_writeback_rate,
  827. &sysfs_gc_after_writeback,
  828. &sysfs_io_disable,
  829. &sysfs_cutoff_writeback,
  830. &sysfs_cutoff_writeback_sync,
  831. &sysfs_feature_compat,
  832. &sysfs_feature_ro_compat,
  833. &sysfs_feature_incompat,
  834. NULL
  835. };
  836. KTYPE(bch_cache_set_internal);
  837. static int __bch_cache_cmp(const void *l, const void *r)
  838. {
  839. cond_resched();
  840. return *((uint16_t *)r) - *((uint16_t *)l);
  841. }
  842. SHOW(__bch_cache)
  843. {
  844. struct cache *ca = container_of(kobj, struct cache, kobj);
  845. sysfs_hprint(bucket_size, bucket_bytes(ca));
  846. sysfs_hprint(block_size, block_bytes(ca));
  847. sysfs_print(nbuckets, ca->sb.nbuckets);
  848. sysfs_print(discard, ca->discard);
  849. sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
  850. sysfs_hprint(btree_written,
  851. atomic_long_read(&ca->btree_sectors_written) << 9);
  852. sysfs_hprint(metadata_written,
  853. (atomic_long_read(&ca->meta_sectors_written) +
  854. atomic_long_read(&ca->btree_sectors_written)) << 9);
  855. sysfs_print(io_errors,
  856. atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
  857. if (attr == &sysfs_cache_replacement_policy)
  858. return bch_snprint_string_list(buf, PAGE_SIZE,
  859. cache_replacement_policies,
  860. CACHE_REPLACEMENT(&ca->sb));
  861. if (attr == &sysfs_priority_stats) {
  862. struct bucket *b;
  863. size_t n = ca->sb.nbuckets, i;
  864. size_t unused = 0, available = 0, dirty = 0, meta = 0;
  865. uint64_t sum = 0;
  866. /* Compute 31 quantiles */
  867. uint16_t q[31], *p, *cached;
  868. ssize_t ret;
  869. cached = p = vmalloc(array_size(sizeof(uint16_t),
  870. ca->sb.nbuckets));
  871. if (!p)
  872. return -ENOMEM;
  873. mutex_lock(&ca->set->bucket_lock);
  874. for_each_bucket(b, ca) {
  875. if (!GC_SECTORS_USED(b))
  876. unused++;
  877. if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
  878. available++;
  879. if (GC_MARK(b) == GC_MARK_DIRTY)
  880. dirty++;
  881. if (GC_MARK(b) == GC_MARK_METADATA)
  882. meta++;
  883. }
  884. for (i = ca->sb.first_bucket; i < n; i++)
  885. p[i] = ca->buckets[i].prio;
  886. mutex_unlock(&ca->set->bucket_lock);
  887. sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
  888. while (n &&
  889. !cached[n - 1])
  890. --n;
  891. while (cached < p + n &&
  892. *cached == BTREE_PRIO)
  893. cached++, n--;
  894. for (i = 0; i < n; i++)
  895. sum += INITIAL_PRIO - cached[i];
  896. if (n)
  897. do_div(sum, n);
  898. for (i = 0; i < ARRAY_SIZE(q); i++)
  899. q[i] = INITIAL_PRIO - cached[n * (i + 1) /
  900. (ARRAY_SIZE(q) + 1)];
  901. vfree(p);
  902. ret = scnprintf(buf, PAGE_SIZE,
  903. "Unused: %zu%%\n"
  904. "Clean: %zu%%\n"
  905. "Dirty: %zu%%\n"
  906. "Metadata: %zu%%\n"
  907. "Average: %llu\n"
  908. "Sectors per Q: %zu\n"
  909. "Quantiles: [",
  910. unused * 100 / (size_t) ca->sb.nbuckets,
  911. available * 100 / (size_t) ca->sb.nbuckets,
  912. dirty * 100 / (size_t) ca->sb.nbuckets,
  913. meta * 100 / (size_t) ca->sb.nbuckets, sum,
  914. n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
  915. for (i = 0; i < ARRAY_SIZE(q); i++)
  916. ret += scnprintf(buf + ret, PAGE_SIZE - ret,
  917. "%u ", q[i]);
  918. ret--;
  919. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
  920. return ret;
  921. }
  922. return 0;
  923. }
  924. SHOW_LOCKED(bch_cache)
  925. STORE(__bch_cache)
  926. {
  927. struct cache *ca = container_of(kobj, struct cache, kobj);
  928. ssize_t v;
  929. /* no user space access if system is rebooting */
  930. if (bcache_is_reboot)
  931. return -EBUSY;
  932. if (attr == &sysfs_discard) {
  933. bool v = strtoul_or_return(buf);
  934. if (blk_queue_discard(bdev_get_queue(ca->bdev)))
  935. ca->discard = v;
  936. if (v != CACHE_DISCARD(&ca->sb)) {
  937. SET_CACHE_DISCARD(&ca->sb, v);
  938. bcache_write_super(ca->set);
  939. }
  940. }
  941. if (attr == &sysfs_cache_replacement_policy) {
  942. v = __sysfs_match_string(cache_replacement_policies, -1, buf);
  943. if (v < 0)
  944. return v;
  945. if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
  946. mutex_lock(&ca->set->bucket_lock);
  947. SET_CACHE_REPLACEMENT(&ca->sb, v);
  948. mutex_unlock(&ca->set->bucket_lock);
  949. bcache_write_super(ca->set);
  950. }
  951. }
  952. if (attr == &sysfs_clear_stats) {
  953. atomic_long_set(&ca->sectors_written, 0);
  954. atomic_long_set(&ca->btree_sectors_written, 0);
  955. atomic_long_set(&ca->meta_sectors_written, 0);
  956. atomic_set(&ca->io_count, 0);
  957. atomic_set(&ca->io_errors, 0);
  958. }
  959. return size;
  960. }
  961. STORE_LOCKED(bch_cache)
  962. static struct attribute *bch_cache_files[] = {
  963. &sysfs_bucket_size,
  964. &sysfs_block_size,
  965. &sysfs_nbuckets,
  966. &sysfs_priority_stats,
  967. &sysfs_discard,
  968. &sysfs_written,
  969. &sysfs_btree_written,
  970. &sysfs_metadata_written,
  971. &sysfs_io_errors,
  972. &sysfs_clear_stats,
  973. &sysfs_cache_replacement_policy,
  974. NULL
  975. };
  976. KTYPE(bch_cache);