osdmap.c 70 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/ceph/ceph_debug.h>
  3. #include <linux/module.h>
  4. #include <linux/slab.h>
  5. #include <linux/ceph/libceph.h>
  6. #include <linux/ceph/osdmap.h>
  7. #include <linux/ceph/decode.h>
  8. #include <linux/crush/hash.h>
  9. #include <linux/crush/mapper.h>
  10. char *ceph_osdmap_state_str(char *str, int len, u32 state)
  11. {
  12. if (!len)
  13. return str;
  14. if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
  15. snprintf(str, len, "exists, up");
  16. else if (state & CEPH_OSD_EXISTS)
  17. snprintf(str, len, "exists");
  18. else if (state & CEPH_OSD_UP)
  19. snprintf(str, len, "up");
  20. else
  21. snprintf(str, len, "doesn't exist");
  22. return str;
  23. }
  24. /* maps */
  25. static int calc_bits_of(unsigned int t)
  26. {
  27. int b = 0;
  28. while (t) {
  29. t = t >> 1;
  30. b++;
  31. }
  32. return b;
  33. }
  34. /*
  35. * the foo_mask is the smallest value 2^n-1 that is >= foo.
  36. */
  37. static void calc_pg_masks(struct ceph_pg_pool_info *pi)
  38. {
  39. pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1;
  40. pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1;
  41. }
  42. /*
  43. * decode crush map
  44. */
  45. static int crush_decode_uniform_bucket(void **p, void *end,
  46. struct crush_bucket_uniform *b)
  47. {
  48. dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
  49. ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
  50. b->item_weight = ceph_decode_32(p);
  51. return 0;
  52. bad:
  53. return -EINVAL;
  54. }
  55. static int crush_decode_list_bucket(void **p, void *end,
  56. struct crush_bucket_list *b)
  57. {
  58. int j;
  59. dout("crush_decode_list_bucket %p to %p\n", *p, end);
  60. b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
  61. if (b->item_weights == NULL)
  62. return -ENOMEM;
  63. b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
  64. if (b->sum_weights == NULL)
  65. return -ENOMEM;
  66. ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
  67. for (j = 0; j < b->h.size; j++) {
  68. b->item_weights[j] = ceph_decode_32(p);
  69. b->sum_weights[j] = ceph_decode_32(p);
  70. }
  71. return 0;
  72. bad:
  73. return -EINVAL;
  74. }
  75. static int crush_decode_tree_bucket(void **p, void *end,
  76. struct crush_bucket_tree *b)
  77. {
  78. int j;
  79. dout("crush_decode_tree_bucket %p to %p\n", *p, end);
  80. ceph_decode_8_safe(p, end, b->num_nodes, bad);
  81. b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
  82. if (b->node_weights == NULL)
  83. return -ENOMEM;
  84. ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
  85. for (j = 0; j < b->num_nodes; j++)
  86. b->node_weights[j] = ceph_decode_32(p);
  87. return 0;
  88. bad:
  89. return -EINVAL;
  90. }
  91. static int crush_decode_straw_bucket(void **p, void *end,
  92. struct crush_bucket_straw *b)
  93. {
  94. int j;
  95. dout("crush_decode_straw_bucket %p to %p\n", *p, end);
  96. b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
  97. if (b->item_weights == NULL)
  98. return -ENOMEM;
  99. b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
  100. if (b->straws == NULL)
  101. return -ENOMEM;
  102. ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
  103. for (j = 0; j < b->h.size; j++) {
  104. b->item_weights[j] = ceph_decode_32(p);
  105. b->straws[j] = ceph_decode_32(p);
  106. }
  107. return 0;
  108. bad:
  109. return -EINVAL;
  110. }
  111. static int crush_decode_straw2_bucket(void **p, void *end,
  112. struct crush_bucket_straw2 *b)
  113. {
  114. int j;
  115. dout("crush_decode_straw2_bucket %p to %p\n", *p, end);
  116. b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
  117. if (b->item_weights == NULL)
  118. return -ENOMEM;
  119. ceph_decode_need(p, end, b->h.size * sizeof(u32), bad);
  120. for (j = 0; j < b->h.size; j++)
  121. b->item_weights[j] = ceph_decode_32(p);
  122. return 0;
  123. bad:
  124. return -EINVAL;
  125. }
  126. struct crush_name_node {
  127. struct rb_node cn_node;
  128. int cn_id;
  129. char cn_name[];
  130. };
  131. static struct crush_name_node *alloc_crush_name(size_t name_len)
  132. {
  133. struct crush_name_node *cn;
  134. cn = kmalloc(sizeof(*cn) + name_len + 1, GFP_NOIO);
  135. if (!cn)
  136. return NULL;
  137. RB_CLEAR_NODE(&cn->cn_node);
  138. return cn;
  139. }
  140. static void free_crush_name(struct crush_name_node *cn)
  141. {
  142. WARN_ON(!RB_EMPTY_NODE(&cn->cn_node));
  143. kfree(cn);
  144. }
  145. DEFINE_RB_FUNCS(crush_name, struct crush_name_node, cn_id, cn_node)
  146. static int decode_crush_names(void **p, void *end, struct rb_root *root)
  147. {
  148. u32 n;
  149. ceph_decode_32_safe(p, end, n, e_inval);
  150. while (n--) {
  151. struct crush_name_node *cn;
  152. int id;
  153. u32 name_len;
  154. ceph_decode_32_safe(p, end, id, e_inval);
  155. ceph_decode_32_safe(p, end, name_len, e_inval);
  156. ceph_decode_need(p, end, name_len, e_inval);
  157. cn = alloc_crush_name(name_len);
  158. if (!cn)
  159. return -ENOMEM;
  160. cn->cn_id = id;
  161. memcpy(cn->cn_name, *p, name_len);
  162. cn->cn_name[name_len] = '\0';
  163. *p += name_len;
  164. if (!__insert_crush_name(root, cn)) {
  165. free_crush_name(cn);
  166. return -EEXIST;
  167. }
  168. }
  169. return 0;
  170. e_inval:
  171. return -EINVAL;
  172. }
  173. void clear_crush_names(struct rb_root *root)
  174. {
  175. while (!RB_EMPTY_ROOT(root)) {
  176. struct crush_name_node *cn =
  177. rb_entry(rb_first(root), struct crush_name_node, cn_node);
  178. erase_crush_name(root, cn);
  179. free_crush_name(cn);
  180. }
  181. }
  182. static struct crush_choose_arg_map *alloc_choose_arg_map(void)
  183. {
  184. struct crush_choose_arg_map *arg_map;
  185. arg_map = kzalloc(sizeof(*arg_map), GFP_NOIO);
  186. if (!arg_map)
  187. return NULL;
  188. RB_CLEAR_NODE(&arg_map->node);
  189. return arg_map;
  190. }
  191. static void free_choose_arg_map(struct crush_choose_arg_map *arg_map)
  192. {
  193. if (arg_map) {
  194. int i, j;
  195. WARN_ON(!RB_EMPTY_NODE(&arg_map->node));
  196. for (i = 0; i < arg_map->size; i++) {
  197. struct crush_choose_arg *arg = &arg_map->args[i];
  198. for (j = 0; j < arg->weight_set_size; j++)
  199. kfree(arg->weight_set[j].weights);
  200. kfree(arg->weight_set);
  201. kfree(arg->ids);
  202. }
  203. kfree(arg_map->args);
  204. kfree(arg_map);
  205. }
  206. }
  207. DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index,
  208. node);
  209. void clear_choose_args(struct crush_map *c)
  210. {
  211. while (!RB_EMPTY_ROOT(&c->choose_args)) {
  212. struct crush_choose_arg_map *arg_map =
  213. rb_entry(rb_first(&c->choose_args),
  214. struct crush_choose_arg_map, node);
  215. erase_choose_arg_map(&c->choose_args, arg_map);
  216. free_choose_arg_map(arg_map);
  217. }
  218. }
  219. static u32 *decode_array_32_alloc(void **p, void *end, u32 *plen)
  220. {
  221. u32 *a = NULL;
  222. u32 len;
  223. int ret;
  224. ceph_decode_32_safe(p, end, len, e_inval);
  225. if (len) {
  226. u32 i;
  227. a = kmalloc_array(len, sizeof(u32), GFP_NOIO);
  228. if (!a) {
  229. ret = -ENOMEM;
  230. goto fail;
  231. }
  232. ceph_decode_need(p, end, len * sizeof(u32), e_inval);
  233. for (i = 0; i < len; i++)
  234. a[i] = ceph_decode_32(p);
  235. }
  236. *plen = len;
  237. return a;
  238. e_inval:
  239. ret = -EINVAL;
  240. fail:
  241. kfree(a);
  242. return ERR_PTR(ret);
  243. }
  244. /*
  245. * Assumes @arg is zero-initialized.
  246. */
  247. static int decode_choose_arg(void **p, void *end, struct crush_choose_arg *arg)
  248. {
  249. int ret;
  250. ceph_decode_32_safe(p, end, arg->weight_set_size, e_inval);
  251. if (arg->weight_set_size) {
  252. u32 i;
  253. arg->weight_set = kmalloc_array(arg->weight_set_size,
  254. sizeof(*arg->weight_set),
  255. GFP_NOIO);
  256. if (!arg->weight_set)
  257. return -ENOMEM;
  258. for (i = 0; i < arg->weight_set_size; i++) {
  259. struct crush_weight_set *w = &arg->weight_set[i];
  260. w->weights = decode_array_32_alloc(p, end, &w->size);
  261. if (IS_ERR(w->weights)) {
  262. ret = PTR_ERR(w->weights);
  263. w->weights = NULL;
  264. return ret;
  265. }
  266. }
  267. }
  268. arg->ids = decode_array_32_alloc(p, end, &arg->ids_size);
  269. if (IS_ERR(arg->ids)) {
  270. ret = PTR_ERR(arg->ids);
  271. arg->ids = NULL;
  272. return ret;
  273. }
  274. return 0;
  275. e_inval:
  276. return -EINVAL;
  277. }
  278. static int decode_choose_args(void **p, void *end, struct crush_map *c)
  279. {
  280. struct crush_choose_arg_map *arg_map = NULL;
  281. u32 num_choose_arg_maps, num_buckets;
  282. int ret;
  283. ceph_decode_32_safe(p, end, num_choose_arg_maps, e_inval);
  284. while (num_choose_arg_maps--) {
  285. arg_map = alloc_choose_arg_map();
  286. if (!arg_map) {
  287. ret = -ENOMEM;
  288. goto fail;
  289. }
  290. ceph_decode_64_safe(p, end, arg_map->choose_args_index,
  291. e_inval);
  292. arg_map->size = c->max_buckets;
  293. arg_map->args = kcalloc(arg_map->size, sizeof(*arg_map->args),
  294. GFP_NOIO);
  295. if (!arg_map->args) {
  296. ret = -ENOMEM;
  297. goto fail;
  298. }
  299. ceph_decode_32_safe(p, end, num_buckets, e_inval);
  300. while (num_buckets--) {
  301. struct crush_choose_arg *arg;
  302. u32 bucket_index;
  303. ceph_decode_32_safe(p, end, bucket_index, e_inval);
  304. if (bucket_index >= arg_map->size)
  305. goto e_inval;
  306. arg = &arg_map->args[bucket_index];
  307. ret = decode_choose_arg(p, end, arg);
  308. if (ret)
  309. goto fail;
  310. if (arg->ids_size &&
  311. arg->ids_size != c->buckets[bucket_index]->size)
  312. goto e_inval;
  313. }
  314. insert_choose_arg_map(&c->choose_args, arg_map);
  315. }
  316. return 0;
  317. e_inval:
  318. ret = -EINVAL;
  319. fail:
  320. free_choose_arg_map(arg_map);
  321. return ret;
  322. }
  323. static void crush_finalize(struct crush_map *c)
  324. {
  325. __s32 b;
  326. /* Space for the array of pointers to per-bucket workspace */
  327. c->working_size = sizeof(struct crush_work) +
  328. c->max_buckets * sizeof(struct crush_work_bucket *);
  329. for (b = 0; b < c->max_buckets; b++) {
  330. if (!c->buckets[b])
  331. continue;
  332. switch (c->buckets[b]->alg) {
  333. default:
  334. /*
  335. * The base case, permutation variables and
  336. * the pointer to the permutation array.
  337. */
  338. c->working_size += sizeof(struct crush_work_bucket);
  339. break;
  340. }
  341. /* Every bucket has a permutation array. */
  342. c->working_size += c->buckets[b]->size * sizeof(__u32);
  343. }
  344. }
  345. static struct crush_map *crush_decode(void *pbyval, void *end)
  346. {
  347. struct crush_map *c;
  348. int err;
  349. int i, j;
  350. void **p = &pbyval;
  351. void *start = pbyval;
  352. u32 magic;
  353. dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
  354. c = kzalloc(sizeof(*c), GFP_NOFS);
  355. if (c == NULL)
  356. return ERR_PTR(-ENOMEM);
  357. c->type_names = RB_ROOT;
  358. c->names = RB_ROOT;
  359. c->choose_args = RB_ROOT;
  360. /* set tunables to default values */
  361. c->choose_local_tries = 2;
  362. c->choose_local_fallback_tries = 5;
  363. c->choose_total_tries = 19;
  364. c->chooseleaf_descend_once = 0;
  365. ceph_decode_need(p, end, 4*sizeof(u32), bad);
  366. magic = ceph_decode_32(p);
  367. if (magic != CRUSH_MAGIC) {
  368. pr_err("crush_decode magic %x != current %x\n",
  369. (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
  370. goto bad;
  371. }
  372. c->max_buckets = ceph_decode_32(p);
  373. c->max_rules = ceph_decode_32(p);
  374. c->max_devices = ceph_decode_32(p);
  375. c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
  376. if (c->buckets == NULL)
  377. goto badmem;
  378. c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
  379. if (c->rules == NULL)
  380. goto badmem;
  381. /* buckets */
  382. for (i = 0; i < c->max_buckets; i++) {
  383. int size = 0;
  384. u32 alg;
  385. struct crush_bucket *b;
  386. ceph_decode_32_safe(p, end, alg, bad);
  387. if (alg == 0) {
  388. c->buckets[i] = NULL;
  389. continue;
  390. }
  391. dout("crush_decode bucket %d off %x %p to %p\n",
  392. i, (int)(*p-start), *p, end);
  393. switch (alg) {
  394. case CRUSH_BUCKET_UNIFORM:
  395. size = sizeof(struct crush_bucket_uniform);
  396. break;
  397. case CRUSH_BUCKET_LIST:
  398. size = sizeof(struct crush_bucket_list);
  399. break;
  400. case CRUSH_BUCKET_TREE:
  401. size = sizeof(struct crush_bucket_tree);
  402. break;
  403. case CRUSH_BUCKET_STRAW:
  404. size = sizeof(struct crush_bucket_straw);
  405. break;
  406. case CRUSH_BUCKET_STRAW2:
  407. size = sizeof(struct crush_bucket_straw2);
  408. break;
  409. default:
  410. goto bad;
  411. }
  412. BUG_ON(size == 0);
  413. b = c->buckets[i] = kzalloc(size, GFP_NOFS);
  414. if (b == NULL)
  415. goto badmem;
  416. ceph_decode_need(p, end, 4*sizeof(u32), bad);
  417. b->id = ceph_decode_32(p);
  418. b->type = ceph_decode_16(p);
  419. b->alg = ceph_decode_8(p);
  420. b->hash = ceph_decode_8(p);
  421. b->weight = ceph_decode_32(p);
  422. b->size = ceph_decode_32(p);
  423. dout("crush_decode bucket size %d off %x %p to %p\n",
  424. b->size, (int)(*p-start), *p, end);
  425. b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
  426. if (b->items == NULL)
  427. goto badmem;
  428. ceph_decode_need(p, end, b->size*sizeof(u32), bad);
  429. for (j = 0; j < b->size; j++)
  430. b->items[j] = ceph_decode_32(p);
  431. switch (b->alg) {
  432. case CRUSH_BUCKET_UNIFORM:
  433. err = crush_decode_uniform_bucket(p, end,
  434. (struct crush_bucket_uniform *)b);
  435. if (err < 0)
  436. goto fail;
  437. break;
  438. case CRUSH_BUCKET_LIST:
  439. err = crush_decode_list_bucket(p, end,
  440. (struct crush_bucket_list *)b);
  441. if (err < 0)
  442. goto fail;
  443. break;
  444. case CRUSH_BUCKET_TREE:
  445. err = crush_decode_tree_bucket(p, end,
  446. (struct crush_bucket_tree *)b);
  447. if (err < 0)
  448. goto fail;
  449. break;
  450. case CRUSH_BUCKET_STRAW:
  451. err = crush_decode_straw_bucket(p, end,
  452. (struct crush_bucket_straw *)b);
  453. if (err < 0)
  454. goto fail;
  455. break;
  456. case CRUSH_BUCKET_STRAW2:
  457. err = crush_decode_straw2_bucket(p, end,
  458. (struct crush_bucket_straw2 *)b);
  459. if (err < 0)
  460. goto fail;
  461. break;
  462. }
  463. }
  464. /* rules */
  465. dout("rule vec is %p\n", c->rules);
  466. for (i = 0; i < c->max_rules; i++) {
  467. u32 yes;
  468. struct crush_rule *r;
  469. ceph_decode_32_safe(p, end, yes, bad);
  470. if (!yes) {
  471. dout("crush_decode NO rule %d off %x %p to %p\n",
  472. i, (int)(*p-start), *p, end);
  473. c->rules[i] = NULL;
  474. continue;
  475. }
  476. dout("crush_decode rule %d off %x %p to %p\n",
  477. i, (int)(*p-start), *p, end);
  478. /* len */
  479. ceph_decode_32_safe(p, end, yes, bad);
  480. #if BITS_PER_LONG == 32
  481. if (yes > (ULONG_MAX - sizeof(*r))
  482. / sizeof(struct crush_rule_step))
  483. goto bad;
  484. #endif
  485. r = kmalloc(struct_size(r, steps, yes), GFP_NOFS);
  486. c->rules[i] = r;
  487. if (r == NULL)
  488. goto badmem;
  489. dout(" rule %d is at %p\n", i, r);
  490. r->len = yes;
  491. ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
  492. ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
  493. for (j = 0; j < r->len; j++) {
  494. r->steps[j].op = ceph_decode_32(p);
  495. r->steps[j].arg1 = ceph_decode_32(p);
  496. r->steps[j].arg2 = ceph_decode_32(p);
  497. }
  498. }
  499. err = decode_crush_names(p, end, &c->type_names);
  500. if (err)
  501. goto fail;
  502. err = decode_crush_names(p, end, &c->names);
  503. if (err)
  504. goto fail;
  505. ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */
  506. /* tunables */
  507. ceph_decode_need(p, end, 3*sizeof(u32), done);
  508. c->choose_local_tries = ceph_decode_32(p);
  509. c->choose_local_fallback_tries = ceph_decode_32(p);
  510. c->choose_total_tries = ceph_decode_32(p);
  511. dout("crush decode tunable choose_local_tries = %d\n",
  512. c->choose_local_tries);
  513. dout("crush decode tunable choose_local_fallback_tries = %d\n",
  514. c->choose_local_fallback_tries);
  515. dout("crush decode tunable choose_total_tries = %d\n",
  516. c->choose_total_tries);
  517. ceph_decode_need(p, end, sizeof(u32), done);
  518. c->chooseleaf_descend_once = ceph_decode_32(p);
  519. dout("crush decode tunable chooseleaf_descend_once = %d\n",
  520. c->chooseleaf_descend_once);
  521. ceph_decode_need(p, end, sizeof(u8), done);
  522. c->chooseleaf_vary_r = ceph_decode_8(p);
  523. dout("crush decode tunable chooseleaf_vary_r = %d\n",
  524. c->chooseleaf_vary_r);
  525. /* skip straw_calc_version, allowed_bucket_algs */
  526. ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done);
  527. *p += sizeof(u8) + sizeof(u32);
  528. ceph_decode_need(p, end, sizeof(u8), done);
  529. c->chooseleaf_stable = ceph_decode_8(p);
  530. dout("crush decode tunable chooseleaf_stable = %d\n",
  531. c->chooseleaf_stable);
  532. if (*p != end) {
  533. /* class_map */
  534. ceph_decode_skip_map(p, end, 32, 32, bad);
  535. /* class_name */
  536. ceph_decode_skip_map(p, end, 32, string, bad);
  537. /* class_bucket */
  538. ceph_decode_skip_map_of_map(p, end, 32, 32, 32, bad);
  539. }
  540. if (*p != end) {
  541. err = decode_choose_args(p, end, c);
  542. if (err)
  543. goto fail;
  544. }
  545. done:
  546. crush_finalize(c);
  547. dout("crush_decode success\n");
  548. return c;
  549. badmem:
  550. err = -ENOMEM;
  551. fail:
  552. dout("crush_decode fail %d\n", err);
  553. crush_destroy(c);
  554. return ERR_PTR(err);
  555. bad:
  556. err = -EINVAL;
  557. goto fail;
  558. }
  559. int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs)
  560. {
  561. if (lhs->pool < rhs->pool)
  562. return -1;
  563. if (lhs->pool > rhs->pool)
  564. return 1;
  565. if (lhs->seed < rhs->seed)
  566. return -1;
  567. if (lhs->seed > rhs->seed)
  568. return 1;
  569. return 0;
  570. }
  571. int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs)
  572. {
  573. int ret;
  574. ret = ceph_pg_compare(&lhs->pgid, &rhs->pgid);
  575. if (ret)
  576. return ret;
  577. if (lhs->shard < rhs->shard)
  578. return -1;
  579. if (lhs->shard > rhs->shard)
  580. return 1;
  581. return 0;
  582. }
  583. static struct ceph_pg_mapping *alloc_pg_mapping(size_t payload_len)
  584. {
  585. struct ceph_pg_mapping *pg;
  586. pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO);
  587. if (!pg)
  588. return NULL;
  589. RB_CLEAR_NODE(&pg->node);
  590. return pg;
  591. }
  592. static void free_pg_mapping(struct ceph_pg_mapping *pg)
  593. {
  594. WARN_ON(!RB_EMPTY_NODE(&pg->node));
  595. kfree(pg);
  596. }
  597. /*
  598. * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
  599. * to a set of osds) and primary_temp (explicit primary setting)
  600. */
  601. DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare,
  602. RB_BYPTR, const struct ceph_pg *, node)
  603. /*
  604. * rbtree of pg pool info
  605. */
  606. DEFINE_RB_FUNCS(pg_pool, struct ceph_pg_pool_info, id, node)
  607. struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id)
  608. {
  609. return lookup_pg_pool(&map->pg_pools, id);
  610. }
  611. const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
  612. {
  613. struct ceph_pg_pool_info *pi;
  614. if (id == CEPH_NOPOOL)
  615. return NULL;
  616. if (WARN_ON_ONCE(id > (u64) INT_MAX))
  617. return NULL;
  618. pi = lookup_pg_pool(&map->pg_pools, id);
  619. return pi ? pi->name : NULL;
  620. }
  621. EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
  622. int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
  623. {
  624. struct rb_node *rbp;
  625. for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
  626. struct ceph_pg_pool_info *pi =
  627. rb_entry(rbp, struct ceph_pg_pool_info, node);
  628. if (pi->name && strcmp(pi->name, name) == 0)
  629. return pi->id;
  630. }
  631. return -ENOENT;
  632. }
  633. EXPORT_SYMBOL(ceph_pg_poolid_by_name);
  634. u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id)
  635. {
  636. struct ceph_pg_pool_info *pi;
  637. pi = lookup_pg_pool(&map->pg_pools, id);
  638. return pi ? pi->flags : 0;
  639. }
  640. EXPORT_SYMBOL(ceph_pg_pool_flags);
  641. static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
  642. {
  643. erase_pg_pool(root, pi);
  644. kfree(pi->name);
  645. kfree(pi);
  646. }
  647. static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
  648. {
  649. u8 ev, cv;
  650. unsigned len, num;
  651. void *pool_end;
  652. ceph_decode_need(p, end, 2 + 4, bad);
  653. ev = ceph_decode_8(p); /* encoding version */
  654. cv = ceph_decode_8(p); /* compat version */
  655. if (ev < 5) {
  656. pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv);
  657. return -EINVAL;
  658. }
  659. if (cv > 9) {
  660. pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv);
  661. return -EINVAL;
  662. }
  663. len = ceph_decode_32(p);
  664. ceph_decode_need(p, end, len, bad);
  665. pool_end = *p + len;
  666. pi->type = ceph_decode_8(p);
  667. pi->size = ceph_decode_8(p);
  668. pi->crush_ruleset = ceph_decode_8(p);
  669. pi->object_hash = ceph_decode_8(p);
  670. pi->pg_num = ceph_decode_32(p);
  671. pi->pgp_num = ceph_decode_32(p);
  672. *p += 4 + 4; /* skip lpg* */
  673. *p += 4; /* skip last_change */
  674. *p += 8 + 4; /* skip snap_seq, snap_epoch */
  675. /* skip snaps */
  676. num = ceph_decode_32(p);
  677. while (num--) {
  678. *p += 8; /* snapid key */
  679. *p += 1 + 1; /* versions */
  680. len = ceph_decode_32(p);
  681. *p += len;
  682. }
  683. /* skip removed_snaps */
  684. num = ceph_decode_32(p);
  685. *p += num * (8 + 8);
  686. *p += 8; /* skip auid */
  687. pi->flags = ceph_decode_64(p);
  688. *p += 4; /* skip crash_replay_interval */
  689. if (ev >= 7)
  690. pi->min_size = ceph_decode_8(p);
  691. else
  692. pi->min_size = pi->size - pi->size / 2;
  693. if (ev >= 8)
  694. *p += 8 + 8; /* skip quota_max_* */
  695. if (ev >= 9) {
  696. /* skip tiers */
  697. num = ceph_decode_32(p);
  698. *p += num * 8;
  699. *p += 8; /* skip tier_of */
  700. *p += 1; /* skip cache_mode */
  701. pi->read_tier = ceph_decode_64(p);
  702. pi->write_tier = ceph_decode_64(p);
  703. } else {
  704. pi->read_tier = -1;
  705. pi->write_tier = -1;
  706. }
  707. if (ev >= 10) {
  708. /* skip properties */
  709. num = ceph_decode_32(p);
  710. while (num--) {
  711. len = ceph_decode_32(p);
  712. *p += len; /* key */
  713. len = ceph_decode_32(p);
  714. *p += len; /* val */
  715. }
  716. }
  717. if (ev >= 11) {
  718. /* skip hit_set_params */
  719. *p += 1 + 1; /* versions */
  720. len = ceph_decode_32(p);
  721. *p += len;
  722. *p += 4; /* skip hit_set_period */
  723. *p += 4; /* skip hit_set_count */
  724. }
  725. if (ev >= 12)
  726. *p += 4; /* skip stripe_width */
  727. if (ev >= 13) {
  728. *p += 8; /* skip target_max_bytes */
  729. *p += 8; /* skip target_max_objects */
  730. *p += 4; /* skip cache_target_dirty_ratio_micro */
  731. *p += 4; /* skip cache_target_full_ratio_micro */
  732. *p += 4; /* skip cache_min_flush_age */
  733. *p += 4; /* skip cache_min_evict_age */
  734. }
  735. if (ev >= 14) {
  736. /* skip erasure_code_profile */
  737. len = ceph_decode_32(p);
  738. *p += len;
  739. }
  740. /*
  741. * last_force_op_resend_preluminous, will be overridden if the
  742. * map was encoded with RESEND_ON_SPLIT
  743. */
  744. if (ev >= 15)
  745. pi->last_force_request_resend = ceph_decode_32(p);
  746. else
  747. pi->last_force_request_resend = 0;
  748. if (ev >= 16)
  749. *p += 4; /* skip min_read_recency_for_promote */
  750. if (ev >= 17)
  751. *p += 8; /* skip expected_num_objects */
  752. if (ev >= 19)
  753. *p += 4; /* skip cache_target_dirty_high_ratio_micro */
  754. if (ev >= 20)
  755. *p += 4; /* skip min_write_recency_for_promote */
  756. if (ev >= 21)
  757. *p += 1; /* skip use_gmt_hitset */
  758. if (ev >= 22)
  759. *p += 1; /* skip fast_read */
  760. if (ev >= 23) {
  761. *p += 4; /* skip hit_set_grade_decay_rate */
  762. *p += 4; /* skip hit_set_search_last_n */
  763. }
  764. if (ev >= 24) {
  765. /* skip opts */
  766. *p += 1 + 1; /* versions */
  767. len = ceph_decode_32(p);
  768. *p += len;
  769. }
  770. if (ev >= 25)
  771. pi->last_force_request_resend = ceph_decode_32(p);
  772. /* ignore the rest */
  773. *p = pool_end;
  774. calc_pg_masks(pi);
  775. return 0;
  776. bad:
  777. return -EINVAL;
  778. }
  779. static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
  780. {
  781. struct ceph_pg_pool_info *pi;
  782. u32 num, len;
  783. u64 pool;
  784. ceph_decode_32_safe(p, end, num, bad);
  785. dout(" %d pool names\n", num);
  786. while (num--) {
  787. ceph_decode_64_safe(p, end, pool, bad);
  788. ceph_decode_32_safe(p, end, len, bad);
  789. dout(" pool %llu len %d\n", pool, len);
  790. ceph_decode_need(p, end, len, bad);
  791. pi = lookup_pg_pool(&map->pg_pools, pool);
  792. if (pi) {
  793. char *name = kstrndup(*p, len, GFP_NOFS);
  794. if (!name)
  795. return -ENOMEM;
  796. kfree(pi->name);
  797. pi->name = name;
  798. dout(" name is %s\n", pi->name);
  799. }
  800. *p += len;
  801. }
  802. return 0;
  803. bad:
  804. return -EINVAL;
  805. }
  806. /*
  807. * CRUSH workspaces
  808. *
  809. * workspace_manager framework borrowed from fs/btrfs/compression.c.
  810. * Two simplifications: there is only one type of workspace and there
  811. * is always at least one workspace.
  812. */
  813. static struct crush_work *alloc_workspace(const struct crush_map *c)
  814. {
  815. struct crush_work *work;
  816. size_t work_size;
  817. WARN_ON(!c->working_size);
  818. work_size = crush_work_size(c, CEPH_PG_MAX_SIZE);
  819. dout("%s work_size %zu bytes\n", __func__, work_size);
  820. work = ceph_kvmalloc(work_size, GFP_NOIO);
  821. if (!work)
  822. return NULL;
  823. INIT_LIST_HEAD(&work->item);
  824. crush_init_workspace(c, work);
  825. return work;
  826. }
  827. static void free_workspace(struct crush_work *work)
  828. {
  829. WARN_ON(!list_empty(&work->item));
  830. kvfree(work);
  831. }
  832. static void init_workspace_manager(struct workspace_manager *wsm)
  833. {
  834. INIT_LIST_HEAD(&wsm->idle_ws);
  835. spin_lock_init(&wsm->ws_lock);
  836. atomic_set(&wsm->total_ws, 0);
  837. wsm->free_ws = 0;
  838. init_waitqueue_head(&wsm->ws_wait);
  839. }
  840. static void add_initial_workspace(struct workspace_manager *wsm,
  841. struct crush_work *work)
  842. {
  843. WARN_ON(!list_empty(&wsm->idle_ws));
  844. list_add(&work->item, &wsm->idle_ws);
  845. atomic_set(&wsm->total_ws, 1);
  846. wsm->free_ws = 1;
  847. }
  848. static void cleanup_workspace_manager(struct workspace_manager *wsm)
  849. {
  850. struct crush_work *work;
  851. while (!list_empty(&wsm->idle_ws)) {
  852. work = list_first_entry(&wsm->idle_ws, struct crush_work,
  853. item);
  854. list_del_init(&work->item);
  855. free_workspace(work);
  856. }
  857. atomic_set(&wsm->total_ws, 0);
  858. wsm->free_ws = 0;
  859. }
  860. /*
  861. * Finds an available workspace or allocates a new one. If it's not
  862. * possible to allocate a new one, waits until there is one.
  863. */
  864. static struct crush_work *get_workspace(struct workspace_manager *wsm,
  865. const struct crush_map *c)
  866. {
  867. struct crush_work *work;
  868. int cpus = num_online_cpus();
  869. again:
  870. spin_lock(&wsm->ws_lock);
  871. if (!list_empty(&wsm->idle_ws)) {
  872. work = list_first_entry(&wsm->idle_ws, struct crush_work,
  873. item);
  874. list_del_init(&work->item);
  875. wsm->free_ws--;
  876. spin_unlock(&wsm->ws_lock);
  877. return work;
  878. }
  879. if (atomic_read(&wsm->total_ws) > cpus) {
  880. DEFINE_WAIT(wait);
  881. spin_unlock(&wsm->ws_lock);
  882. prepare_to_wait(&wsm->ws_wait, &wait, TASK_UNINTERRUPTIBLE);
  883. if (atomic_read(&wsm->total_ws) > cpus && !wsm->free_ws)
  884. schedule();
  885. finish_wait(&wsm->ws_wait, &wait);
  886. goto again;
  887. }
  888. atomic_inc(&wsm->total_ws);
  889. spin_unlock(&wsm->ws_lock);
  890. work = alloc_workspace(c);
  891. if (!work) {
  892. atomic_dec(&wsm->total_ws);
  893. wake_up(&wsm->ws_wait);
  894. /*
  895. * Do not return the error but go back to waiting. We
  896. * have the inital workspace and the CRUSH computation
  897. * time is bounded so we will get it eventually.
  898. */
  899. WARN_ON(atomic_read(&wsm->total_ws) < 1);
  900. goto again;
  901. }
  902. return work;
  903. }
  904. /*
  905. * Puts a workspace back on the list or frees it if we have enough
  906. * idle ones sitting around.
  907. */
  908. static void put_workspace(struct workspace_manager *wsm,
  909. struct crush_work *work)
  910. {
  911. spin_lock(&wsm->ws_lock);
  912. if (wsm->free_ws <= num_online_cpus()) {
  913. list_add(&work->item, &wsm->idle_ws);
  914. wsm->free_ws++;
  915. spin_unlock(&wsm->ws_lock);
  916. goto wake;
  917. }
  918. spin_unlock(&wsm->ws_lock);
  919. free_workspace(work);
  920. atomic_dec(&wsm->total_ws);
  921. wake:
  922. if (wq_has_sleeper(&wsm->ws_wait))
  923. wake_up(&wsm->ws_wait);
  924. }
  925. /*
  926. * osd map
  927. */
  928. struct ceph_osdmap *ceph_osdmap_alloc(void)
  929. {
  930. struct ceph_osdmap *map;
  931. map = kzalloc(sizeof(*map), GFP_NOIO);
  932. if (!map)
  933. return NULL;
  934. map->pg_pools = RB_ROOT;
  935. map->pool_max = -1;
  936. map->pg_temp = RB_ROOT;
  937. map->primary_temp = RB_ROOT;
  938. map->pg_upmap = RB_ROOT;
  939. map->pg_upmap_items = RB_ROOT;
  940. init_workspace_manager(&map->crush_wsm);
  941. return map;
  942. }
  943. void ceph_osdmap_destroy(struct ceph_osdmap *map)
  944. {
  945. dout("osdmap_destroy %p\n", map);
  946. if (map->crush)
  947. crush_destroy(map->crush);
  948. cleanup_workspace_manager(&map->crush_wsm);
  949. while (!RB_EMPTY_ROOT(&map->pg_temp)) {
  950. struct ceph_pg_mapping *pg =
  951. rb_entry(rb_first(&map->pg_temp),
  952. struct ceph_pg_mapping, node);
  953. erase_pg_mapping(&map->pg_temp, pg);
  954. free_pg_mapping(pg);
  955. }
  956. while (!RB_EMPTY_ROOT(&map->primary_temp)) {
  957. struct ceph_pg_mapping *pg =
  958. rb_entry(rb_first(&map->primary_temp),
  959. struct ceph_pg_mapping, node);
  960. erase_pg_mapping(&map->primary_temp, pg);
  961. free_pg_mapping(pg);
  962. }
  963. while (!RB_EMPTY_ROOT(&map->pg_upmap)) {
  964. struct ceph_pg_mapping *pg =
  965. rb_entry(rb_first(&map->pg_upmap),
  966. struct ceph_pg_mapping, node);
  967. rb_erase(&pg->node, &map->pg_upmap);
  968. kfree(pg);
  969. }
  970. while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) {
  971. struct ceph_pg_mapping *pg =
  972. rb_entry(rb_first(&map->pg_upmap_items),
  973. struct ceph_pg_mapping, node);
  974. rb_erase(&pg->node, &map->pg_upmap_items);
  975. kfree(pg);
  976. }
  977. while (!RB_EMPTY_ROOT(&map->pg_pools)) {
  978. struct ceph_pg_pool_info *pi =
  979. rb_entry(rb_first(&map->pg_pools),
  980. struct ceph_pg_pool_info, node);
  981. __remove_pg_pool(&map->pg_pools, pi);
  982. }
  983. kvfree(map->osd_state);
  984. kvfree(map->osd_weight);
  985. kvfree(map->osd_addr);
  986. kvfree(map->osd_primary_affinity);
  987. kfree(map);
  988. }
  989. /*
  990. * Adjust max_osd value, (re)allocate arrays.
  991. *
  992. * The new elements are properly initialized.
  993. */
  994. static int osdmap_set_max_osd(struct ceph_osdmap *map, u32 max)
  995. {
  996. u32 *state;
  997. u32 *weight;
  998. struct ceph_entity_addr *addr;
  999. u32 to_copy;
  1000. int i;
  1001. dout("%s old %u new %u\n", __func__, map->max_osd, max);
  1002. if (max == map->max_osd)
  1003. return 0;
  1004. state = ceph_kvmalloc(array_size(max, sizeof(*state)), GFP_NOFS);
  1005. weight = ceph_kvmalloc(array_size(max, sizeof(*weight)), GFP_NOFS);
  1006. addr = ceph_kvmalloc(array_size(max, sizeof(*addr)), GFP_NOFS);
  1007. if (!state || !weight || !addr) {
  1008. kvfree(state);
  1009. kvfree(weight);
  1010. kvfree(addr);
  1011. return -ENOMEM;
  1012. }
  1013. to_copy = min(map->max_osd, max);
  1014. if (map->osd_state) {
  1015. memcpy(state, map->osd_state, to_copy * sizeof(*state));
  1016. memcpy(weight, map->osd_weight, to_copy * sizeof(*weight));
  1017. memcpy(addr, map->osd_addr, to_copy * sizeof(*addr));
  1018. kvfree(map->osd_state);
  1019. kvfree(map->osd_weight);
  1020. kvfree(map->osd_addr);
  1021. }
  1022. map->osd_state = state;
  1023. map->osd_weight = weight;
  1024. map->osd_addr = addr;
  1025. for (i = map->max_osd; i < max; i++) {
  1026. map->osd_state[i] = 0;
  1027. map->osd_weight[i] = CEPH_OSD_OUT;
  1028. memset(map->osd_addr + i, 0, sizeof(*map->osd_addr));
  1029. }
  1030. if (map->osd_primary_affinity) {
  1031. u32 *affinity;
  1032. affinity = ceph_kvmalloc(array_size(max, sizeof(*affinity)),
  1033. GFP_NOFS);
  1034. if (!affinity)
  1035. return -ENOMEM;
  1036. memcpy(affinity, map->osd_primary_affinity,
  1037. to_copy * sizeof(*affinity));
  1038. kvfree(map->osd_primary_affinity);
  1039. map->osd_primary_affinity = affinity;
  1040. for (i = map->max_osd; i < max; i++)
  1041. map->osd_primary_affinity[i] =
  1042. CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
  1043. }
  1044. map->max_osd = max;
  1045. return 0;
  1046. }
  1047. static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush)
  1048. {
  1049. struct crush_work *work;
  1050. if (IS_ERR(crush))
  1051. return PTR_ERR(crush);
  1052. work = alloc_workspace(crush);
  1053. if (!work) {
  1054. crush_destroy(crush);
  1055. return -ENOMEM;
  1056. }
  1057. if (map->crush)
  1058. crush_destroy(map->crush);
  1059. cleanup_workspace_manager(&map->crush_wsm);
  1060. map->crush = crush;
  1061. add_initial_workspace(&map->crush_wsm, work);
  1062. return 0;
  1063. }
  1064. #define OSDMAP_WRAPPER_COMPAT_VER 7
  1065. #define OSDMAP_CLIENT_DATA_COMPAT_VER 1
  1066. /*
  1067. * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps,
  1068. * to struct_v of the client_data section for new (v7 and above)
  1069. * osdmaps.
  1070. */
  1071. static int get_osdmap_client_data_v(void **p, void *end,
  1072. const char *prefix, u8 *v)
  1073. {
  1074. u8 struct_v;
  1075. ceph_decode_8_safe(p, end, struct_v, e_inval);
  1076. if (struct_v >= 7) {
  1077. u8 struct_compat;
  1078. ceph_decode_8_safe(p, end, struct_compat, e_inval);
  1079. if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) {
  1080. pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n",
  1081. struct_v, struct_compat,
  1082. OSDMAP_WRAPPER_COMPAT_VER, prefix);
  1083. return -EINVAL;
  1084. }
  1085. *p += 4; /* ignore wrapper struct_len */
  1086. ceph_decode_8_safe(p, end, struct_v, e_inval);
  1087. ceph_decode_8_safe(p, end, struct_compat, e_inval);
  1088. if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) {
  1089. pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n",
  1090. struct_v, struct_compat,
  1091. OSDMAP_CLIENT_DATA_COMPAT_VER, prefix);
  1092. return -EINVAL;
  1093. }
  1094. *p += 4; /* ignore client data struct_len */
  1095. } else {
  1096. u16 version;
  1097. *p -= 1;
  1098. ceph_decode_16_safe(p, end, version, e_inval);
  1099. if (version < 6) {
  1100. pr_warn("got v %d < 6 of %s ceph_osdmap\n",
  1101. version, prefix);
  1102. return -EINVAL;
  1103. }
  1104. /* old osdmap enconding */
  1105. struct_v = 0;
  1106. }
  1107. *v = struct_v;
  1108. return 0;
  1109. e_inval:
  1110. return -EINVAL;
  1111. }
  1112. static int __decode_pools(void **p, void *end, struct ceph_osdmap *map,
  1113. bool incremental)
  1114. {
  1115. u32 n;
  1116. ceph_decode_32_safe(p, end, n, e_inval);
  1117. while (n--) {
  1118. struct ceph_pg_pool_info *pi;
  1119. u64 pool;
  1120. int ret;
  1121. ceph_decode_64_safe(p, end, pool, e_inval);
  1122. pi = lookup_pg_pool(&map->pg_pools, pool);
  1123. if (!incremental || !pi) {
  1124. pi = kzalloc(sizeof(*pi), GFP_NOFS);
  1125. if (!pi)
  1126. return -ENOMEM;
  1127. RB_CLEAR_NODE(&pi->node);
  1128. pi->id = pool;
  1129. if (!__insert_pg_pool(&map->pg_pools, pi)) {
  1130. kfree(pi);
  1131. return -EEXIST;
  1132. }
  1133. }
  1134. ret = decode_pool(p, end, pi);
  1135. if (ret)
  1136. return ret;
  1137. }
  1138. return 0;
  1139. e_inval:
  1140. return -EINVAL;
  1141. }
  1142. static int decode_pools(void **p, void *end, struct ceph_osdmap *map)
  1143. {
  1144. return __decode_pools(p, end, map, false);
  1145. }
  1146. static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map)
  1147. {
  1148. return __decode_pools(p, end, map, true);
  1149. }
  1150. typedef struct ceph_pg_mapping *(*decode_mapping_fn_t)(void **, void *, bool);
  1151. static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root,
  1152. decode_mapping_fn_t fn, bool incremental)
  1153. {
  1154. u32 n;
  1155. WARN_ON(!incremental && !fn);
  1156. ceph_decode_32_safe(p, end, n, e_inval);
  1157. while (n--) {
  1158. struct ceph_pg_mapping *pg;
  1159. struct ceph_pg pgid;
  1160. int ret;
  1161. ret = ceph_decode_pgid(p, end, &pgid);
  1162. if (ret)
  1163. return ret;
  1164. pg = lookup_pg_mapping(mapping_root, &pgid);
  1165. if (pg) {
  1166. WARN_ON(!incremental);
  1167. erase_pg_mapping(mapping_root, pg);
  1168. free_pg_mapping(pg);
  1169. }
  1170. if (fn) {
  1171. pg = fn(p, end, incremental);
  1172. if (IS_ERR(pg))
  1173. return PTR_ERR(pg);
  1174. if (pg) {
  1175. pg->pgid = pgid; /* struct */
  1176. insert_pg_mapping(mapping_root, pg);
  1177. }
  1178. }
  1179. }
  1180. return 0;
  1181. e_inval:
  1182. return -EINVAL;
  1183. }
  1184. static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end,
  1185. bool incremental)
  1186. {
  1187. struct ceph_pg_mapping *pg;
  1188. u32 len, i;
  1189. ceph_decode_32_safe(p, end, len, e_inval);
  1190. if (len == 0 && incremental)
  1191. return NULL; /* new_pg_temp: [] to remove */
  1192. if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32))
  1193. return ERR_PTR(-EINVAL);
  1194. ceph_decode_need(p, end, len * sizeof(u32), e_inval);
  1195. pg = alloc_pg_mapping(len * sizeof(u32));
  1196. if (!pg)
  1197. return ERR_PTR(-ENOMEM);
  1198. pg->pg_temp.len = len;
  1199. for (i = 0; i < len; i++)
  1200. pg->pg_temp.osds[i] = ceph_decode_32(p);
  1201. return pg;
  1202. e_inval:
  1203. return ERR_PTR(-EINVAL);
  1204. }
  1205. static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map)
  1206. {
  1207. return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
  1208. false);
  1209. }
  1210. static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map)
  1211. {
  1212. return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
  1213. true);
  1214. }
  1215. static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end,
  1216. bool incremental)
  1217. {
  1218. struct ceph_pg_mapping *pg;
  1219. u32 osd;
  1220. ceph_decode_32_safe(p, end, osd, e_inval);
  1221. if (osd == (u32)-1 && incremental)
  1222. return NULL; /* new_primary_temp: -1 to remove */
  1223. pg = alloc_pg_mapping(0);
  1224. if (!pg)
  1225. return ERR_PTR(-ENOMEM);
  1226. pg->primary_temp.osd = osd;
  1227. return pg;
  1228. e_inval:
  1229. return ERR_PTR(-EINVAL);
  1230. }
  1231. static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map)
  1232. {
  1233. return decode_pg_mapping(p, end, &map->primary_temp,
  1234. __decode_primary_temp, false);
  1235. }
  1236. static int decode_new_primary_temp(void **p, void *end,
  1237. struct ceph_osdmap *map)
  1238. {
  1239. return decode_pg_mapping(p, end, &map->primary_temp,
  1240. __decode_primary_temp, true);
  1241. }
  1242. u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd)
  1243. {
  1244. BUG_ON(osd >= map->max_osd);
  1245. if (!map->osd_primary_affinity)
  1246. return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
  1247. return map->osd_primary_affinity[osd];
  1248. }
  1249. static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff)
  1250. {
  1251. BUG_ON(osd >= map->max_osd);
  1252. if (!map->osd_primary_affinity) {
  1253. int i;
  1254. map->osd_primary_affinity = ceph_kvmalloc(
  1255. array_size(map->max_osd, sizeof(*map->osd_primary_affinity)),
  1256. GFP_NOFS);
  1257. if (!map->osd_primary_affinity)
  1258. return -ENOMEM;
  1259. for (i = 0; i < map->max_osd; i++)
  1260. map->osd_primary_affinity[i] =
  1261. CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
  1262. }
  1263. map->osd_primary_affinity[osd] = aff;
  1264. return 0;
  1265. }
  1266. static int decode_primary_affinity(void **p, void *end,
  1267. struct ceph_osdmap *map)
  1268. {
  1269. u32 len, i;
  1270. ceph_decode_32_safe(p, end, len, e_inval);
  1271. if (len == 0) {
  1272. kvfree(map->osd_primary_affinity);
  1273. map->osd_primary_affinity = NULL;
  1274. return 0;
  1275. }
  1276. if (len != map->max_osd)
  1277. goto e_inval;
  1278. ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval);
  1279. for (i = 0; i < map->max_osd; i++) {
  1280. int ret;
  1281. ret = set_primary_affinity(map, i, ceph_decode_32(p));
  1282. if (ret)
  1283. return ret;
  1284. }
  1285. return 0;
  1286. e_inval:
  1287. return -EINVAL;
  1288. }
  1289. static int decode_new_primary_affinity(void **p, void *end,
  1290. struct ceph_osdmap *map)
  1291. {
  1292. u32 n;
  1293. ceph_decode_32_safe(p, end, n, e_inval);
  1294. while (n--) {
  1295. u32 osd, aff;
  1296. int ret;
  1297. ceph_decode_32_safe(p, end, osd, e_inval);
  1298. ceph_decode_32_safe(p, end, aff, e_inval);
  1299. ret = set_primary_affinity(map, osd, aff);
  1300. if (ret)
  1301. return ret;
  1302. pr_info("osd%d primary-affinity 0x%x\n", osd, aff);
  1303. }
  1304. return 0;
  1305. e_inval:
  1306. return -EINVAL;
  1307. }
  1308. static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end,
  1309. bool __unused)
  1310. {
  1311. return __decode_pg_temp(p, end, false);
  1312. }
  1313. static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
  1314. {
  1315. return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
  1316. false);
  1317. }
  1318. static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
  1319. {
  1320. return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
  1321. true);
  1322. }
  1323. static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
  1324. {
  1325. return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true);
  1326. }
  1327. static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end,
  1328. bool __unused)
  1329. {
  1330. struct ceph_pg_mapping *pg;
  1331. u32 len, i;
  1332. ceph_decode_32_safe(p, end, len, e_inval);
  1333. if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32)))
  1334. return ERR_PTR(-EINVAL);
  1335. ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval);
  1336. pg = alloc_pg_mapping(2 * len * sizeof(u32));
  1337. if (!pg)
  1338. return ERR_PTR(-ENOMEM);
  1339. pg->pg_upmap_items.len = len;
  1340. for (i = 0; i < len; i++) {
  1341. pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p);
  1342. pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p);
  1343. }
  1344. return pg;
  1345. e_inval:
  1346. return ERR_PTR(-EINVAL);
  1347. }
  1348. static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map)
  1349. {
  1350. return decode_pg_mapping(p, end, &map->pg_upmap_items,
  1351. __decode_pg_upmap_items, false);
  1352. }
  1353. static int decode_new_pg_upmap_items(void **p, void *end,
  1354. struct ceph_osdmap *map)
  1355. {
  1356. return decode_pg_mapping(p, end, &map->pg_upmap_items,
  1357. __decode_pg_upmap_items, true);
  1358. }
  1359. static int decode_old_pg_upmap_items(void **p, void *end,
  1360. struct ceph_osdmap *map)
  1361. {
  1362. return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true);
  1363. }
  1364. /*
  1365. * decode a full map.
  1366. */
  1367. static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map)
  1368. {
  1369. u8 struct_v;
  1370. u32 epoch = 0;
  1371. void *start = *p;
  1372. u32 max;
  1373. u32 len, i;
  1374. int err;
  1375. dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
  1376. err = get_osdmap_client_data_v(p, end, "full", &struct_v);
  1377. if (err)
  1378. goto bad;
  1379. /* fsid, epoch, created, modified */
  1380. ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) +
  1381. sizeof(map->created) + sizeof(map->modified), e_inval);
  1382. ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
  1383. epoch = map->epoch = ceph_decode_32(p);
  1384. ceph_decode_copy(p, &map->created, sizeof(map->created));
  1385. ceph_decode_copy(p, &map->modified, sizeof(map->modified));
  1386. /* pools */
  1387. err = decode_pools(p, end, map);
  1388. if (err)
  1389. goto bad;
  1390. /* pool_name */
  1391. err = decode_pool_names(p, end, map);
  1392. if (err)
  1393. goto bad;
  1394. ceph_decode_32_safe(p, end, map->pool_max, e_inval);
  1395. ceph_decode_32_safe(p, end, map->flags, e_inval);
  1396. /* max_osd */
  1397. ceph_decode_32_safe(p, end, max, e_inval);
  1398. /* (re)alloc osd arrays */
  1399. err = osdmap_set_max_osd(map, max);
  1400. if (err)
  1401. goto bad;
  1402. /* osd_state, osd_weight, osd_addrs->client_addr */
  1403. ceph_decode_need(p, end, 3*sizeof(u32) +
  1404. map->max_osd*(struct_v >= 5 ? sizeof(u32) :
  1405. sizeof(u8)) +
  1406. sizeof(*map->osd_weight), e_inval);
  1407. if (ceph_decode_32(p) != map->max_osd)
  1408. goto e_inval;
  1409. if (struct_v >= 5) {
  1410. for (i = 0; i < map->max_osd; i++)
  1411. map->osd_state[i] = ceph_decode_32(p);
  1412. } else {
  1413. for (i = 0; i < map->max_osd; i++)
  1414. map->osd_state[i] = ceph_decode_8(p);
  1415. }
  1416. if (ceph_decode_32(p) != map->max_osd)
  1417. goto e_inval;
  1418. for (i = 0; i < map->max_osd; i++)
  1419. map->osd_weight[i] = ceph_decode_32(p);
  1420. if (ceph_decode_32(p) != map->max_osd)
  1421. goto e_inval;
  1422. for (i = 0; i < map->max_osd; i++) {
  1423. err = ceph_decode_entity_addr(p, end, &map->osd_addr[i]);
  1424. if (err)
  1425. goto bad;
  1426. }
  1427. /* pg_temp */
  1428. err = decode_pg_temp(p, end, map);
  1429. if (err)
  1430. goto bad;
  1431. /* primary_temp */
  1432. if (struct_v >= 1) {
  1433. err = decode_primary_temp(p, end, map);
  1434. if (err)
  1435. goto bad;
  1436. }
  1437. /* primary_affinity */
  1438. if (struct_v >= 2) {
  1439. err = decode_primary_affinity(p, end, map);
  1440. if (err)
  1441. goto bad;
  1442. } else {
  1443. WARN_ON(map->osd_primary_affinity);
  1444. }
  1445. /* crush */
  1446. ceph_decode_32_safe(p, end, len, e_inval);
  1447. err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end)));
  1448. if (err)
  1449. goto bad;
  1450. *p += len;
  1451. if (struct_v >= 3) {
  1452. /* erasure_code_profiles */
  1453. ceph_decode_skip_map_of_map(p, end, string, string, string,
  1454. e_inval);
  1455. }
  1456. if (struct_v >= 4) {
  1457. err = decode_pg_upmap(p, end, map);
  1458. if (err)
  1459. goto bad;
  1460. err = decode_pg_upmap_items(p, end, map);
  1461. if (err)
  1462. goto bad;
  1463. } else {
  1464. WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap));
  1465. WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items));
  1466. }
  1467. /* ignore the rest */
  1468. *p = end;
  1469. dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
  1470. return 0;
  1471. e_inval:
  1472. err = -EINVAL;
  1473. bad:
  1474. pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
  1475. err, epoch, (int)(*p - start), *p, start, end);
  1476. print_hex_dump(KERN_DEBUG, "osdmap: ",
  1477. DUMP_PREFIX_OFFSET, 16, 1,
  1478. start, end - start, true);
  1479. return err;
  1480. }
  1481. /*
  1482. * Allocate and decode a full map.
  1483. */
  1484. struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
  1485. {
  1486. struct ceph_osdmap *map;
  1487. int ret;
  1488. map = ceph_osdmap_alloc();
  1489. if (!map)
  1490. return ERR_PTR(-ENOMEM);
  1491. ret = osdmap_decode(p, end, map);
  1492. if (ret) {
  1493. ceph_osdmap_destroy(map);
  1494. return ERR_PTR(ret);
  1495. }
  1496. return map;
  1497. }
  1498. /*
  1499. * Encoding order is (new_up_client, new_state, new_weight). Need to
  1500. * apply in the (new_weight, new_state, new_up_client) order, because
  1501. * an incremental map may look like e.g.
  1502. *
  1503. * new_up_client: { osd=6, addr=... } # set osd_state and addr
  1504. * new_state: { osd=6, xorstate=EXISTS } # clear osd_state
  1505. */
  1506. static int decode_new_up_state_weight(void **p, void *end, u8 struct_v,
  1507. struct ceph_osdmap *map)
  1508. {
  1509. void *new_up_client;
  1510. void *new_state;
  1511. void *new_weight_end;
  1512. u32 len;
  1513. int i;
  1514. new_up_client = *p;
  1515. ceph_decode_32_safe(p, end, len, e_inval);
  1516. for (i = 0; i < len; ++i) {
  1517. struct ceph_entity_addr addr;
  1518. ceph_decode_skip_32(p, end, e_inval);
  1519. if (ceph_decode_entity_addr(p, end, &addr))
  1520. goto e_inval;
  1521. }
  1522. new_state = *p;
  1523. ceph_decode_32_safe(p, end, len, e_inval);
  1524. len *= sizeof(u32) + (struct_v >= 5 ? sizeof(u32) : sizeof(u8));
  1525. ceph_decode_need(p, end, len, e_inval);
  1526. *p += len;
  1527. /* new_weight */
  1528. ceph_decode_32_safe(p, end, len, e_inval);
  1529. while (len--) {
  1530. s32 osd;
  1531. u32 w;
  1532. ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
  1533. osd = ceph_decode_32(p);
  1534. w = ceph_decode_32(p);
  1535. BUG_ON(osd >= map->max_osd);
  1536. pr_info("osd%d weight 0x%x %s\n", osd, w,
  1537. w == CEPH_OSD_IN ? "(in)" :
  1538. (w == CEPH_OSD_OUT ? "(out)" : ""));
  1539. map->osd_weight[osd] = w;
  1540. /*
  1541. * If we are marking in, set the EXISTS, and clear the
  1542. * AUTOOUT and NEW bits.
  1543. */
  1544. if (w) {
  1545. map->osd_state[osd] |= CEPH_OSD_EXISTS;
  1546. map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
  1547. CEPH_OSD_NEW);
  1548. }
  1549. }
  1550. new_weight_end = *p;
  1551. /* new_state (up/down) */
  1552. *p = new_state;
  1553. len = ceph_decode_32(p);
  1554. while (len--) {
  1555. s32 osd;
  1556. u32 xorstate;
  1557. int ret;
  1558. osd = ceph_decode_32(p);
  1559. if (struct_v >= 5)
  1560. xorstate = ceph_decode_32(p);
  1561. else
  1562. xorstate = ceph_decode_8(p);
  1563. if (xorstate == 0)
  1564. xorstate = CEPH_OSD_UP;
  1565. BUG_ON(osd >= map->max_osd);
  1566. if ((map->osd_state[osd] & CEPH_OSD_UP) &&
  1567. (xorstate & CEPH_OSD_UP))
  1568. pr_info("osd%d down\n", osd);
  1569. if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
  1570. (xorstate & CEPH_OSD_EXISTS)) {
  1571. pr_info("osd%d does not exist\n", osd);
  1572. ret = set_primary_affinity(map, osd,
  1573. CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
  1574. if (ret)
  1575. return ret;
  1576. memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
  1577. map->osd_state[osd] = 0;
  1578. } else {
  1579. map->osd_state[osd] ^= xorstate;
  1580. }
  1581. }
  1582. /* new_up_client */
  1583. *p = new_up_client;
  1584. len = ceph_decode_32(p);
  1585. while (len--) {
  1586. s32 osd;
  1587. struct ceph_entity_addr addr;
  1588. osd = ceph_decode_32(p);
  1589. BUG_ON(osd >= map->max_osd);
  1590. if (ceph_decode_entity_addr(p, end, &addr))
  1591. goto e_inval;
  1592. pr_info("osd%d up\n", osd);
  1593. map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
  1594. map->osd_addr[osd] = addr;
  1595. }
  1596. *p = new_weight_end;
  1597. return 0;
  1598. e_inval:
  1599. return -EINVAL;
  1600. }
  1601. /*
  1602. * decode and apply an incremental map update.
  1603. */
  1604. struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
  1605. struct ceph_osdmap *map)
  1606. {
  1607. struct ceph_fsid fsid;
  1608. u32 epoch = 0;
  1609. struct ceph_timespec modified;
  1610. s32 len;
  1611. u64 pool;
  1612. __s64 new_pool_max;
  1613. __s32 new_flags, max;
  1614. void *start = *p;
  1615. int err;
  1616. u8 struct_v;
  1617. dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
  1618. err = get_osdmap_client_data_v(p, end, "inc", &struct_v);
  1619. if (err)
  1620. goto bad;
  1621. /* fsid, epoch, modified, new_pool_max, new_flags */
  1622. ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) +
  1623. sizeof(u64) + sizeof(u32), e_inval);
  1624. ceph_decode_copy(p, &fsid, sizeof(fsid));
  1625. epoch = ceph_decode_32(p);
  1626. BUG_ON(epoch != map->epoch+1);
  1627. ceph_decode_copy(p, &modified, sizeof(modified));
  1628. new_pool_max = ceph_decode_64(p);
  1629. new_flags = ceph_decode_32(p);
  1630. /* full map? */
  1631. ceph_decode_32_safe(p, end, len, e_inval);
  1632. if (len > 0) {
  1633. dout("apply_incremental full map len %d, %p to %p\n",
  1634. len, *p, end);
  1635. return ceph_osdmap_decode(p, min(*p+len, end));
  1636. }
  1637. /* new crush? */
  1638. ceph_decode_32_safe(p, end, len, e_inval);
  1639. if (len > 0) {
  1640. err = osdmap_set_crush(map,
  1641. crush_decode(*p, min(*p + len, end)));
  1642. if (err)
  1643. goto bad;
  1644. *p += len;
  1645. }
  1646. /* new flags? */
  1647. if (new_flags >= 0)
  1648. map->flags = new_flags;
  1649. if (new_pool_max >= 0)
  1650. map->pool_max = new_pool_max;
  1651. /* new max? */
  1652. ceph_decode_32_safe(p, end, max, e_inval);
  1653. if (max >= 0) {
  1654. err = osdmap_set_max_osd(map, max);
  1655. if (err)
  1656. goto bad;
  1657. }
  1658. map->epoch++;
  1659. map->modified = modified;
  1660. /* new_pools */
  1661. err = decode_new_pools(p, end, map);
  1662. if (err)
  1663. goto bad;
  1664. /* new_pool_names */
  1665. err = decode_pool_names(p, end, map);
  1666. if (err)
  1667. goto bad;
  1668. /* old_pool */
  1669. ceph_decode_32_safe(p, end, len, e_inval);
  1670. while (len--) {
  1671. struct ceph_pg_pool_info *pi;
  1672. ceph_decode_64_safe(p, end, pool, e_inval);
  1673. pi = lookup_pg_pool(&map->pg_pools, pool);
  1674. if (pi)
  1675. __remove_pg_pool(&map->pg_pools, pi);
  1676. }
  1677. /* new_up_client, new_state, new_weight */
  1678. err = decode_new_up_state_weight(p, end, struct_v, map);
  1679. if (err)
  1680. goto bad;
  1681. /* new_pg_temp */
  1682. err = decode_new_pg_temp(p, end, map);
  1683. if (err)
  1684. goto bad;
  1685. /* new_primary_temp */
  1686. if (struct_v >= 1) {
  1687. err = decode_new_primary_temp(p, end, map);
  1688. if (err)
  1689. goto bad;
  1690. }
  1691. /* new_primary_affinity */
  1692. if (struct_v >= 2) {
  1693. err = decode_new_primary_affinity(p, end, map);
  1694. if (err)
  1695. goto bad;
  1696. }
  1697. if (struct_v >= 3) {
  1698. /* new_erasure_code_profiles */
  1699. ceph_decode_skip_map_of_map(p, end, string, string, string,
  1700. e_inval);
  1701. /* old_erasure_code_profiles */
  1702. ceph_decode_skip_set(p, end, string, e_inval);
  1703. }
  1704. if (struct_v >= 4) {
  1705. err = decode_new_pg_upmap(p, end, map);
  1706. if (err)
  1707. goto bad;
  1708. err = decode_old_pg_upmap(p, end, map);
  1709. if (err)
  1710. goto bad;
  1711. err = decode_new_pg_upmap_items(p, end, map);
  1712. if (err)
  1713. goto bad;
  1714. err = decode_old_pg_upmap_items(p, end, map);
  1715. if (err)
  1716. goto bad;
  1717. }
  1718. /* ignore the rest */
  1719. *p = end;
  1720. dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
  1721. return map;
  1722. e_inval:
  1723. err = -EINVAL;
  1724. bad:
  1725. pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
  1726. err, epoch, (int)(*p - start), *p, start, end);
  1727. print_hex_dump(KERN_DEBUG, "osdmap: ",
  1728. DUMP_PREFIX_OFFSET, 16, 1,
  1729. start, end - start, true);
  1730. return ERR_PTR(err);
  1731. }
  1732. void ceph_oloc_copy(struct ceph_object_locator *dest,
  1733. const struct ceph_object_locator *src)
  1734. {
  1735. ceph_oloc_destroy(dest);
  1736. dest->pool = src->pool;
  1737. if (src->pool_ns)
  1738. dest->pool_ns = ceph_get_string(src->pool_ns);
  1739. else
  1740. dest->pool_ns = NULL;
  1741. }
  1742. EXPORT_SYMBOL(ceph_oloc_copy);
  1743. void ceph_oloc_destroy(struct ceph_object_locator *oloc)
  1744. {
  1745. ceph_put_string(oloc->pool_ns);
  1746. }
  1747. EXPORT_SYMBOL(ceph_oloc_destroy);
  1748. void ceph_oid_copy(struct ceph_object_id *dest,
  1749. const struct ceph_object_id *src)
  1750. {
  1751. ceph_oid_destroy(dest);
  1752. if (src->name != src->inline_name) {
  1753. /* very rare, see ceph_object_id definition */
  1754. dest->name = kmalloc(src->name_len + 1,
  1755. GFP_NOIO | __GFP_NOFAIL);
  1756. } else {
  1757. dest->name = dest->inline_name;
  1758. }
  1759. memcpy(dest->name, src->name, src->name_len + 1);
  1760. dest->name_len = src->name_len;
  1761. }
  1762. EXPORT_SYMBOL(ceph_oid_copy);
  1763. static __printf(2, 0)
  1764. int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap)
  1765. {
  1766. int len;
  1767. WARN_ON(!ceph_oid_empty(oid));
  1768. len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap);
  1769. if (len >= sizeof(oid->inline_name))
  1770. return len;
  1771. oid->name_len = len;
  1772. return 0;
  1773. }
  1774. /*
  1775. * If oid doesn't fit into inline buffer, BUG.
  1776. */
  1777. void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...)
  1778. {
  1779. va_list ap;
  1780. va_start(ap, fmt);
  1781. BUG_ON(oid_printf_vargs(oid, fmt, ap));
  1782. va_end(ap);
  1783. }
  1784. EXPORT_SYMBOL(ceph_oid_printf);
  1785. static __printf(3, 0)
  1786. int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp,
  1787. const char *fmt, va_list ap)
  1788. {
  1789. va_list aq;
  1790. int len;
  1791. va_copy(aq, ap);
  1792. len = oid_printf_vargs(oid, fmt, aq);
  1793. va_end(aq);
  1794. if (len) {
  1795. char *external_name;
  1796. external_name = kmalloc(len + 1, gfp);
  1797. if (!external_name)
  1798. return -ENOMEM;
  1799. oid->name = external_name;
  1800. WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len);
  1801. oid->name_len = len;
  1802. }
  1803. return 0;
  1804. }
  1805. /*
  1806. * If oid doesn't fit into inline buffer, allocate.
  1807. */
  1808. int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
  1809. const char *fmt, ...)
  1810. {
  1811. va_list ap;
  1812. int ret;
  1813. va_start(ap, fmt);
  1814. ret = oid_aprintf_vargs(oid, gfp, fmt, ap);
  1815. va_end(ap);
  1816. return ret;
  1817. }
  1818. EXPORT_SYMBOL(ceph_oid_aprintf);
  1819. void ceph_oid_destroy(struct ceph_object_id *oid)
  1820. {
  1821. if (oid->name != oid->inline_name)
  1822. kfree(oid->name);
  1823. }
  1824. EXPORT_SYMBOL(ceph_oid_destroy);
  1825. /*
  1826. * osds only
  1827. */
  1828. static bool __osds_equal(const struct ceph_osds *lhs,
  1829. const struct ceph_osds *rhs)
  1830. {
  1831. if (lhs->size == rhs->size &&
  1832. !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0])))
  1833. return true;
  1834. return false;
  1835. }
  1836. /*
  1837. * osds + primary
  1838. */
  1839. static bool osds_equal(const struct ceph_osds *lhs,
  1840. const struct ceph_osds *rhs)
  1841. {
  1842. if (__osds_equal(lhs, rhs) &&
  1843. lhs->primary == rhs->primary)
  1844. return true;
  1845. return false;
  1846. }
  1847. static bool osds_valid(const struct ceph_osds *set)
  1848. {
  1849. /* non-empty set */
  1850. if (set->size > 0 && set->primary >= 0)
  1851. return true;
  1852. /* empty can_shift_osds set */
  1853. if (!set->size && set->primary == -1)
  1854. return true;
  1855. /* empty !can_shift_osds set - all NONE */
  1856. if (set->size > 0 && set->primary == -1) {
  1857. int i;
  1858. for (i = 0; i < set->size; i++) {
  1859. if (set->osds[i] != CRUSH_ITEM_NONE)
  1860. break;
  1861. }
  1862. if (i == set->size)
  1863. return true;
  1864. }
  1865. return false;
  1866. }
  1867. void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src)
  1868. {
  1869. memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0]));
  1870. dest->size = src->size;
  1871. dest->primary = src->primary;
  1872. }
  1873. bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num,
  1874. u32 new_pg_num)
  1875. {
  1876. int old_bits = calc_bits_of(old_pg_num);
  1877. int old_mask = (1 << old_bits) - 1;
  1878. int n;
  1879. WARN_ON(pgid->seed >= old_pg_num);
  1880. if (new_pg_num <= old_pg_num)
  1881. return false;
  1882. for (n = 1; ; n++) {
  1883. int next_bit = n << (old_bits - 1);
  1884. u32 s = next_bit | pgid->seed;
  1885. if (s < old_pg_num || s == pgid->seed)
  1886. continue;
  1887. if (s >= new_pg_num)
  1888. break;
  1889. s = ceph_stable_mod(s, old_pg_num, old_mask);
  1890. if (s == pgid->seed)
  1891. return true;
  1892. }
  1893. return false;
  1894. }
  1895. bool ceph_is_new_interval(const struct ceph_osds *old_acting,
  1896. const struct ceph_osds *new_acting,
  1897. const struct ceph_osds *old_up,
  1898. const struct ceph_osds *new_up,
  1899. int old_size,
  1900. int new_size,
  1901. int old_min_size,
  1902. int new_min_size,
  1903. u32 old_pg_num,
  1904. u32 new_pg_num,
  1905. bool old_sort_bitwise,
  1906. bool new_sort_bitwise,
  1907. bool old_recovery_deletes,
  1908. bool new_recovery_deletes,
  1909. const struct ceph_pg *pgid)
  1910. {
  1911. return !osds_equal(old_acting, new_acting) ||
  1912. !osds_equal(old_up, new_up) ||
  1913. old_size != new_size ||
  1914. old_min_size != new_min_size ||
  1915. ceph_pg_is_split(pgid, old_pg_num, new_pg_num) ||
  1916. old_sort_bitwise != new_sort_bitwise ||
  1917. old_recovery_deletes != new_recovery_deletes;
  1918. }
  1919. static int calc_pg_rank(int osd, const struct ceph_osds *acting)
  1920. {
  1921. int i;
  1922. for (i = 0; i < acting->size; i++) {
  1923. if (acting->osds[i] == osd)
  1924. return i;
  1925. }
  1926. return -1;
  1927. }
  1928. static bool primary_changed(const struct ceph_osds *old_acting,
  1929. const struct ceph_osds *new_acting)
  1930. {
  1931. if (!old_acting->size && !new_acting->size)
  1932. return false; /* both still empty */
  1933. if (!old_acting->size ^ !new_acting->size)
  1934. return true; /* was empty, now not, or vice versa */
  1935. if (old_acting->primary != new_acting->primary)
  1936. return true; /* primary changed */
  1937. if (calc_pg_rank(old_acting->primary, old_acting) !=
  1938. calc_pg_rank(new_acting->primary, new_acting))
  1939. return true;
  1940. return false; /* same primary (tho replicas may have changed) */
  1941. }
  1942. bool ceph_osds_changed(const struct ceph_osds *old_acting,
  1943. const struct ceph_osds *new_acting,
  1944. bool any_change)
  1945. {
  1946. if (primary_changed(old_acting, new_acting))
  1947. return true;
  1948. if (any_change && !__osds_equal(old_acting, new_acting))
  1949. return true;
  1950. return false;
  1951. }
  1952. /*
  1953. * Map an object into a PG.
  1954. *
  1955. * Should only be called with target_oid and target_oloc (as opposed to
  1956. * base_oid and base_oloc), since tiering isn't taken into account.
  1957. */
  1958. void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
  1959. const struct ceph_object_id *oid,
  1960. const struct ceph_object_locator *oloc,
  1961. struct ceph_pg *raw_pgid)
  1962. {
  1963. WARN_ON(pi->id != oloc->pool);
  1964. if (!oloc->pool_ns) {
  1965. raw_pgid->pool = oloc->pool;
  1966. raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
  1967. oid->name_len);
  1968. dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
  1969. raw_pgid->pool, raw_pgid->seed);
  1970. } else {
  1971. char stack_buf[256];
  1972. char *buf = stack_buf;
  1973. int nsl = oloc->pool_ns->len;
  1974. size_t total = nsl + 1 + oid->name_len;
  1975. if (total > sizeof(stack_buf))
  1976. buf = kmalloc(total, GFP_NOIO | __GFP_NOFAIL);
  1977. memcpy(buf, oloc->pool_ns->str, nsl);
  1978. buf[nsl] = '\037';
  1979. memcpy(buf + nsl + 1, oid->name, oid->name_len);
  1980. raw_pgid->pool = oloc->pool;
  1981. raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total);
  1982. if (buf != stack_buf)
  1983. kfree(buf);
  1984. dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__,
  1985. oid->name, nsl, oloc->pool_ns->str,
  1986. raw_pgid->pool, raw_pgid->seed);
  1987. }
  1988. }
  1989. int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
  1990. const struct ceph_object_id *oid,
  1991. const struct ceph_object_locator *oloc,
  1992. struct ceph_pg *raw_pgid)
  1993. {
  1994. struct ceph_pg_pool_info *pi;
  1995. pi = ceph_pg_pool_by_id(osdmap, oloc->pool);
  1996. if (!pi)
  1997. return -ENOENT;
  1998. __ceph_object_locator_to_pg(pi, oid, oloc, raw_pgid);
  1999. return 0;
  2000. }
  2001. EXPORT_SYMBOL(ceph_object_locator_to_pg);
  2002. /*
  2003. * Map a raw PG (full precision ps) into an actual PG.
  2004. */
  2005. static void raw_pg_to_pg(struct ceph_pg_pool_info *pi,
  2006. const struct ceph_pg *raw_pgid,
  2007. struct ceph_pg *pgid)
  2008. {
  2009. pgid->pool = raw_pgid->pool;
  2010. pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num,
  2011. pi->pg_num_mask);
  2012. }
  2013. /*
  2014. * Map a raw PG (full precision ps) into a placement ps (placement
  2015. * seed). Include pool id in that value so that different pools don't
  2016. * use the same seeds.
  2017. */
  2018. static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi,
  2019. const struct ceph_pg *raw_pgid)
  2020. {
  2021. if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
  2022. /* hash pool id and seed so that pool PGs do not overlap */
  2023. return crush_hash32_2(CRUSH_HASH_RJENKINS1,
  2024. ceph_stable_mod(raw_pgid->seed,
  2025. pi->pgp_num,
  2026. pi->pgp_num_mask),
  2027. raw_pgid->pool);
  2028. } else {
  2029. /*
  2030. * legacy behavior: add ps and pool together. this is
  2031. * not a great approach because the PGs from each pool
  2032. * will overlap on top of each other: 0.5 == 1.4 ==
  2033. * 2.3 == ...
  2034. */
  2035. return ceph_stable_mod(raw_pgid->seed, pi->pgp_num,
  2036. pi->pgp_num_mask) +
  2037. (unsigned)raw_pgid->pool;
  2038. }
  2039. }
  2040. /*
  2041. * Magic value used for a "default" fallback choose_args, used if the
  2042. * crush_choose_arg_map passed to do_crush() does not exist. If this
  2043. * also doesn't exist, fall back to canonical weights.
  2044. */
  2045. #define CEPH_DEFAULT_CHOOSE_ARGS -1
  2046. static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
  2047. int *result, int result_max,
  2048. const __u32 *weight, int weight_max,
  2049. s64 choose_args_index)
  2050. {
  2051. struct crush_choose_arg_map *arg_map;
  2052. struct crush_work *work;
  2053. int r;
  2054. BUG_ON(result_max > CEPH_PG_MAX_SIZE);
  2055. arg_map = lookup_choose_arg_map(&map->crush->choose_args,
  2056. choose_args_index);
  2057. if (!arg_map)
  2058. arg_map = lookup_choose_arg_map(&map->crush->choose_args,
  2059. CEPH_DEFAULT_CHOOSE_ARGS);
  2060. work = get_workspace(&map->crush_wsm, map->crush);
  2061. r = crush_do_rule(map->crush, ruleno, x, result, result_max,
  2062. weight, weight_max, work,
  2063. arg_map ? arg_map->args : NULL);
  2064. put_workspace(&map->crush_wsm, work);
  2065. return r;
  2066. }
  2067. static void remove_nonexistent_osds(struct ceph_osdmap *osdmap,
  2068. struct ceph_pg_pool_info *pi,
  2069. struct ceph_osds *set)
  2070. {
  2071. int i;
  2072. if (ceph_can_shift_osds(pi)) {
  2073. int removed = 0;
  2074. /* shift left */
  2075. for (i = 0; i < set->size; i++) {
  2076. if (!ceph_osd_exists(osdmap, set->osds[i])) {
  2077. removed++;
  2078. continue;
  2079. }
  2080. if (removed)
  2081. set->osds[i - removed] = set->osds[i];
  2082. }
  2083. set->size -= removed;
  2084. } else {
  2085. /* set dne devices to NONE */
  2086. for (i = 0; i < set->size; i++) {
  2087. if (!ceph_osd_exists(osdmap, set->osds[i]))
  2088. set->osds[i] = CRUSH_ITEM_NONE;
  2089. }
  2090. }
  2091. }
  2092. /*
  2093. * Calculate raw set (CRUSH output) for given PG and filter out
  2094. * nonexistent OSDs. ->primary is undefined for a raw set.
  2095. *
  2096. * Placement seed (CRUSH input) is returned through @ppps.
  2097. */
  2098. static void pg_to_raw_osds(struct ceph_osdmap *osdmap,
  2099. struct ceph_pg_pool_info *pi,
  2100. const struct ceph_pg *raw_pgid,
  2101. struct ceph_osds *raw,
  2102. u32 *ppps)
  2103. {
  2104. u32 pps = raw_pg_to_pps(pi, raw_pgid);
  2105. int ruleno;
  2106. int len;
  2107. ceph_osds_init(raw);
  2108. if (ppps)
  2109. *ppps = pps;
  2110. ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type,
  2111. pi->size);
  2112. if (ruleno < 0) {
  2113. pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
  2114. pi->id, pi->crush_ruleset, pi->type, pi->size);
  2115. return;
  2116. }
  2117. if (pi->size > ARRAY_SIZE(raw->osds)) {
  2118. pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n",
  2119. pi->id, pi->crush_ruleset, pi->type, pi->size,
  2120. ARRAY_SIZE(raw->osds));
  2121. return;
  2122. }
  2123. len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size,
  2124. osdmap->osd_weight, osdmap->max_osd, pi->id);
  2125. if (len < 0) {
  2126. pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
  2127. len, ruleno, pi->id, pi->crush_ruleset, pi->type,
  2128. pi->size);
  2129. return;
  2130. }
  2131. raw->size = len;
  2132. remove_nonexistent_osds(osdmap, pi, raw);
  2133. }
  2134. /* apply pg_upmap[_items] mappings */
  2135. static void apply_upmap(struct ceph_osdmap *osdmap,
  2136. const struct ceph_pg *pgid,
  2137. struct ceph_osds *raw)
  2138. {
  2139. struct ceph_pg_mapping *pg;
  2140. int i, j;
  2141. pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid);
  2142. if (pg) {
  2143. /* make sure targets aren't marked out */
  2144. for (i = 0; i < pg->pg_upmap.len; i++) {
  2145. int osd = pg->pg_upmap.osds[i];
  2146. if (osd != CRUSH_ITEM_NONE &&
  2147. osd < osdmap->max_osd &&
  2148. osdmap->osd_weight[osd] == 0) {
  2149. /* reject/ignore explicit mapping */
  2150. return;
  2151. }
  2152. }
  2153. for (i = 0; i < pg->pg_upmap.len; i++)
  2154. raw->osds[i] = pg->pg_upmap.osds[i];
  2155. raw->size = pg->pg_upmap.len;
  2156. /* check and apply pg_upmap_items, if any */
  2157. }
  2158. pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid);
  2159. if (pg) {
  2160. /*
  2161. * Note: this approach does not allow a bidirectional swap,
  2162. * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1].
  2163. */
  2164. for (i = 0; i < pg->pg_upmap_items.len; i++) {
  2165. int from = pg->pg_upmap_items.from_to[i][0];
  2166. int to = pg->pg_upmap_items.from_to[i][1];
  2167. int pos = -1;
  2168. bool exists = false;
  2169. /* make sure replacement doesn't already appear */
  2170. for (j = 0; j < raw->size; j++) {
  2171. int osd = raw->osds[j];
  2172. if (osd == to) {
  2173. exists = true;
  2174. break;
  2175. }
  2176. /* ignore mapping if target is marked out */
  2177. if (osd == from && pos < 0 &&
  2178. !(to != CRUSH_ITEM_NONE &&
  2179. to < osdmap->max_osd &&
  2180. osdmap->osd_weight[to] == 0)) {
  2181. pos = j;
  2182. }
  2183. }
  2184. if (!exists && pos >= 0)
  2185. raw->osds[pos] = to;
  2186. }
  2187. }
  2188. }
  2189. /*
  2190. * Given raw set, calculate up set and up primary. By definition of an
  2191. * up set, the result won't contain nonexistent or down OSDs.
  2192. *
  2193. * This is done in-place - on return @set is the up set. If it's
  2194. * empty, ->primary will remain undefined.
  2195. */
  2196. static void raw_to_up_osds(struct ceph_osdmap *osdmap,
  2197. struct ceph_pg_pool_info *pi,
  2198. struct ceph_osds *set)
  2199. {
  2200. int i;
  2201. /* ->primary is undefined for a raw set */
  2202. BUG_ON(set->primary != -1);
  2203. if (ceph_can_shift_osds(pi)) {
  2204. int removed = 0;
  2205. /* shift left */
  2206. for (i = 0; i < set->size; i++) {
  2207. if (ceph_osd_is_down(osdmap, set->osds[i])) {
  2208. removed++;
  2209. continue;
  2210. }
  2211. if (removed)
  2212. set->osds[i - removed] = set->osds[i];
  2213. }
  2214. set->size -= removed;
  2215. if (set->size > 0)
  2216. set->primary = set->osds[0];
  2217. } else {
  2218. /* set down/dne devices to NONE */
  2219. for (i = set->size - 1; i >= 0; i--) {
  2220. if (ceph_osd_is_down(osdmap, set->osds[i]))
  2221. set->osds[i] = CRUSH_ITEM_NONE;
  2222. else
  2223. set->primary = set->osds[i];
  2224. }
  2225. }
  2226. }
  2227. static void apply_primary_affinity(struct ceph_osdmap *osdmap,
  2228. struct ceph_pg_pool_info *pi,
  2229. u32 pps,
  2230. struct ceph_osds *up)
  2231. {
  2232. int i;
  2233. int pos = -1;
  2234. /*
  2235. * Do we have any non-default primary_affinity values for these
  2236. * osds?
  2237. */
  2238. if (!osdmap->osd_primary_affinity)
  2239. return;
  2240. for (i = 0; i < up->size; i++) {
  2241. int osd = up->osds[i];
  2242. if (osd != CRUSH_ITEM_NONE &&
  2243. osdmap->osd_primary_affinity[osd] !=
  2244. CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) {
  2245. break;
  2246. }
  2247. }
  2248. if (i == up->size)
  2249. return;
  2250. /*
  2251. * Pick the primary. Feed both the seed (for the pg) and the
  2252. * osd into the hash/rng so that a proportional fraction of an
  2253. * osd's pgs get rejected as primary.
  2254. */
  2255. for (i = 0; i < up->size; i++) {
  2256. int osd = up->osds[i];
  2257. u32 aff;
  2258. if (osd == CRUSH_ITEM_NONE)
  2259. continue;
  2260. aff = osdmap->osd_primary_affinity[osd];
  2261. if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY &&
  2262. (crush_hash32_2(CRUSH_HASH_RJENKINS1,
  2263. pps, osd) >> 16) >= aff) {
  2264. /*
  2265. * We chose not to use this primary. Note it
  2266. * anyway as a fallback in case we don't pick
  2267. * anyone else, but keep looking.
  2268. */
  2269. if (pos < 0)
  2270. pos = i;
  2271. } else {
  2272. pos = i;
  2273. break;
  2274. }
  2275. }
  2276. if (pos < 0)
  2277. return;
  2278. up->primary = up->osds[pos];
  2279. if (ceph_can_shift_osds(pi) && pos > 0) {
  2280. /* move the new primary to the front */
  2281. for (i = pos; i > 0; i--)
  2282. up->osds[i] = up->osds[i - 1];
  2283. up->osds[0] = up->primary;
  2284. }
  2285. }
  2286. /*
  2287. * Get pg_temp and primary_temp mappings for given PG.
  2288. *
  2289. * Note that a PG may have none, only pg_temp, only primary_temp or
  2290. * both pg_temp and primary_temp mappings. This means @temp isn't
  2291. * always a valid OSD set on return: in the "only primary_temp" case,
  2292. * @temp will have its ->primary >= 0 but ->size == 0.
  2293. */
  2294. static void get_temp_osds(struct ceph_osdmap *osdmap,
  2295. struct ceph_pg_pool_info *pi,
  2296. const struct ceph_pg *pgid,
  2297. struct ceph_osds *temp)
  2298. {
  2299. struct ceph_pg_mapping *pg;
  2300. int i;
  2301. ceph_osds_init(temp);
  2302. /* pg_temp? */
  2303. pg = lookup_pg_mapping(&osdmap->pg_temp, pgid);
  2304. if (pg) {
  2305. for (i = 0; i < pg->pg_temp.len; i++) {
  2306. if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) {
  2307. if (ceph_can_shift_osds(pi))
  2308. continue;
  2309. temp->osds[temp->size++] = CRUSH_ITEM_NONE;
  2310. } else {
  2311. temp->osds[temp->size++] = pg->pg_temp.osds[i];
  2312. }
  2313. }
  2314. /* apply pg_temp's primary */
  2315. for (i = 0; i < temp->size; i++) {
  2316. if (temp->osds[i] != CRUSH_ITEM_NONE) {
  2317. temp->primary = temp->osds[i];
  2318. break;
  2319. }
  2320. }
  2321. }
  2322. /* primary_temp? */
  2323. pg = lookup_pg_mapping(&osdmap->primary_temp, pgid);
  2324. if (pg)
  2325. temp->primary = pg->primary_temp.osd;
  2326. }
  2327. /*
  2328. * Map a PG to its acting set as well as its up set.
  2329. *
  2330. * Acting set is used for data mapping purposes, while up set can be
  2331. * recorded for detecting interval changes and deciding whether to
  2332. * resend a request.
  2333. */
  2334. void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
  2335. struct ceph_pg_pool_info *pi,
  2336. const struct ceph_pg *raw_pgid,
  2337. struct ceph_osds *up,
  2338. struct ceph_osds *acting)
  2339. {
  2340. struct ceph_pg pgid;
  2341. u32 pps;
  2342. WARN_ON(pi->id != raw_pgid->pool);
  2343. raw_pg_to_pg(pi, raw_pgid, &pgid);
  2344. pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps);
  2345. apply_upmap(osdmap, &pgid, up);
  2346. raw_to_up_osds(osdmap, pi, up);
  2347. apply_primary_affinity(osdmap, pi, pps, up);
  2348. get_temp_osds(osdmap, pi, &pgid, acting);
  2349. if (!acting->size) {
  2350. memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0]));
  2351. acting->size = up->size;
  2352. if (acting->primary == -1)
  2353. acting->primary = up->primary;
  2354. }
  2355. WARN_ON(!osds_valid(up) || !osds_valid(acting));
  2356. }
  2357. bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap,
  2358. struct ceph_pg_pool_info *pi,
  2359. const struct ceph_pg *raw_pgid,
  2360. struct ceph_spg *spgid)
  2361. {
  2362. struct ceph_pg pgid;
  2363. struct ceph_osds up, acting;
  2364. int i;
  2365. WARN_ON(pi->id != raw_pgid->pool);
  2366. raw_pg_to_pg(pi, raw_pgid, &pgid);
  2367. if (ceph_can_shift_osds(pi)) {
  2368. spgid->pgid = pgid; /* struct */
  2369. spgid->shard = CEPH_SPG_NOSHARD;
  2370. return true;
  2371. }
  2372. ceph_pg_to_up_acting_osds(osdmap, pi, &pgid, &up, &acting);
  2373. for (i = 0; i < acting.size; i++) {
  2374. if (acting.osds[i] == acting.primary) {
  2375. spgid->pgid = pgid; /* struct */
  2376. spgid->shard = i;
  2377. return true;
  2378. }
  2379. }
  2380. return false;
  2381. }
  2382. /*
  2383. * Return acting primary for given PG, or -1 if none.
  2384. */
  2385. int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
  2386. const struct ceph_pg *raw_pgid)
  2387. {
  2388. struct ceph_pg_pool_info *pi;
  2389. struct ceph_osds up, acting;
  2390. pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool);
  2391. if (!pi)
  2392. return -1;
  2393. ceph_pg_to_up_acting_osds(osdmap, pi, raw_pgid, &up, &acting);
  2394. return acting.primary;
  2395. }
  2396. EXPORT_SYMBOL(ceph_pg_to_acting_primary);
  2397. static struct crush_loc_node *alloc_crush_loc(size_t type_name_len,
  2398. size_t name_len)
  2399. {
  2400. struct crush_loc_node *loc;
  2401. loc = kmalloc(sizeof(*loc) + type_name_len + name_len + 2, GFP_NOIO);
  2402. if (!loc)
  2403. return NULL;
  2404. RB_CLEAR_NODE(&loc->cl_node);
  2405. return loc;
  2406. }
  2407. static void free_crush_loc(struct crush_loc_node *loc)
  2408. {
  2409. WARN_ON(!RB_EMPTY_NODE(&loc->cl_node));
  2410. kfree(loc);
  2411. }
  2412. static int crush_loc_compare(const struct crush_loc *loc1,
  2413. const struct crush_loc *loc2)
  2414. {
  2415. return strcmp(loc1->cl_type_name, loc2->cl_type_name) ?:
  2416. strcmp(loc1->cl_name, loc2->cl_name);
  2417. }
  2418. DEFINE_RB_FUNCS2(crush_loc, struct crush_loc_node, cl_loc, crush_loc_compare,
  2419. RB_BYPTR, const struct crush_loc *, cl_node)
  2420. /*
  2421. * Parses a set of <bucket type name>':'<bucket name> pairs separated
  2422. * by '|', e.g. "rack:foo1|rack:foo2|datacenter:bar".
  2423. *
  2424. * Note that @crush_location is modified by strsep().
  2425. */
  2426. int ceph_parse_crush_location(char *crush_location, struct rb_root *locs)
  2427. {
  2428. struct crush_loc_node *loc;
  2429. const char *type_name, *name, *colon;
  2430. size_t type_name_len, name_len;
  2431. dout("%s '%s'\n", __func__, crush_location);
  2432. while ((type_name = strsep(&crush_location, "|"))) {
  2433. colon = strchr(type_name, ':');
  2434. if (!colon)
  2435. return -EINVAL;
  2436. type_name_len = colon - type_name;
  2437. if (type_name_len == 0)
  2438. return -EINVAL;
  2439. name = colon + 1;
  2440. name_len = strlen(name);
  2441. if (name_len == 0)
  2442. return -EINVAL;
  2443. loc = alloc_crush_loc(type_name_len, name_len);
  2444. if (!loc)
  2445. return -ENOMEM;
  2446. loc->cl_loc.cl_type_name = loc->cl_data;
  2447. memcpy(loc->cl_loc.cl_type_name, type_name, type_name_len);
  2448. loc->cl_loc.cl_type_name[type_name_len] = '\0';
  2449. loc->cl_loc.cl_name = loc->cl_data + type_name_len + 1;
  2450. memcpy(loc->cl_loc.cl_name, name, name_len);
  2451. loc->cl_loc.cl_name[name_len] = '\0';
  2452. if (!__insert_crush_loc(locs, loc)) {
  2453. free_crush_loc(loc);
  2454. return -EEXIST;
  2455. }
  2456. dout("%s type_name '%s' name '%s'\n", __func__,
  2457. loc->cl_loc.cl_type_name, loc->cl_loc.cl_name);
  2458. }
  2459. return 0;
  2460. }
  2461. int ceph_compare_crush_locs(struct rb_root *locs1, struct rb_root *locs2)
  2462. {
  2463. struct rb_node *n1 = rb_first(locs1);
  2464. struct rb_node *n2 = rb_first(locs2);
  2465. int ret;
  2466. for ( ; n1 && n2; n1 = rb_next(n1), n2 = rb_next(n2)) {
  2467. struct crush_loc_node *loc1 =
  2468. rb_entry(n1, struct crush_loc_node, cl_node);
  2469. struct crush_loc_node *loc2 =
  2470. rb_entry(n2, struct crush_loc_node, cl_node);
  2471. ret = crush_loc_compare(&loc1->cl_loc, &loc2->cl_loc);
  2472. if (ret)
  2473. return ret;
  2474. }
  2475. if (!n1 && n2)
  2476. return -1;
  2477. if (n1 && !n2)
  2478. return 1;
  2479. return 0;
  2480. }
  2481. void ceph_clear_crush_locs(struct rb_root *locs)
  2482. {
  2483. while (!RB_EMPTY_ROOT(locs)) {
  2484. struct crush_loc_node *loc =
  2485. rb_entry(rb_first(locs), struct crush_loc_node, cl_node);
  2486. erase_crush_loc(locs, loc);
  2487. free_crush_loc(loc);
  2488. }
  2489. }
  2490. /*
  2491. * [a-zA-Z0-9-_.]+
  2492. */
  2493. static bool is_valid_crush_name(const char *name)
  2494. {
  2495. do {
  2496. if (!('a' <= *name && *name <= 'z') &&
  2497. !('A' <= *name && *name <= 'Z') &&
  2498. !('0' <= *name && *name <= '9') &&
  2499. *name != '-' && *name != '_' && *name != '.')
  2500. return false;
  2501. } while (*++name != '\0');
  2502. return true;
  2503. }
  2504. /*
  2505. * Gets the parent of an item. Returns its id (<0 because the
  2506. * parent is always a bucket), type id (>0 for the same reason,
  2507. * via @parent_type_id) and location (via @parent_loc). If no
  2508. * parent, returns 0.
  2509. *
  2510. * Does a linear search, as there are no parent pointers of any
  2511. * kind. Note that the result is ambigous for items that occur
  2512. * multiple times in the map.
  2513. */
  2514. static int get_immediate_parent(struct crush_map *c, int id,
  2515. u16 *parent_type_id,
  2516. struct crush_loc *parent_loc)
  2517. {
  2518. struct crush_bucket *b;
  2519. struct crush_name_node *type_cn, *cn;
  2520. int i, j;
  2521. for (i = 0; i < c->max_buckets; i++) {
  2522. b = c->buckets[i];
  2523. if (!b)
  2524. continue;
  2525. /* ignore per-class shadow hierarchy */
  2526. cn = lookup_crush_name(&c->names, b->id);
  2527. if (!cn || !is_valid_crush_name(cn->cn_name))
  2528. continue;
  2529. for (j = 0; j < b->size; j++) {
  2530. if (b->items[j] != id)
  2531. continue;
  2532. *parent_type_id = b->type;
  2533. type_cn = lookup_crush_name(&c->type_names, b->type);
  2534. parent_loc->cl_type_name = type_cn->cn_name;
  2535. parent_loc->cl_name = cn->cn_name;
  2536. return b->id;
  2537. }
  2538. }
  2539. return 0; /* no parent */
  2540. }
  2541. /*
  2542. * Calculates the locality/distance from an item to a client
  2543. * location expressed in terms of CRUSH hierarchy as a set of
  2544. * (bucket type name, bucket name) pairs. Specifically, looks
  2545. * for the lowest-valued bucket type for which the location of
  2546. * @id matches one of the locations in @locs, so for standard
  2547. * bucket types (host = 1, rack = 3, datacenter = 8, zone = 9)
  2548. * a matching host is closer than a matching rack and a matching
  2549. * data center is closer than a matching zone.
  2550. *
  2551. * Specifying multiple locations (a "multipath" location) such
  2552. * as "rack=foo1 rack=foo2 datacenter=bar" is allowed -- @locs
  2553. * is a multimap. The locality will be:
  2554. *
  2555. * - 3 for OSDs in racks foo1 and foo2
  2556. * - 8 for OSDs in data center bar
  2557. * - -1 for all other OSDs
  2558. *
  2559. * The lowest possible bucket type is 1, so the best locality
  2560. * for an OSD is 1 (i.e. a matching host). Locality 0 would be
  2561. * the OSD itself.
  2562. */
  2563. int ceph_get_crush_locality(struct ceph_osdmap *osdmap, int id,
  2564. struct rb_root *locs)
  2565. {
  2566. struct crush_loc loc;
  2567. u16 type_id;
  2568. /*
  2569. * Instead of repeated get_immediate_parent() calls,
  2570. * the location of @id could be obtained with a single
  2571. * depth-first traversal.
  2572. */
  2573. for (;;) {
  2574. id = get_immediate_parent(osdmap->crush, id, &type_id, &loc);
  2575. if (id >= 0)
  2576. return -1; /* not local */
  2577. if (lookup_crush_loc(locs, &loc))
  2578. return type_id;
  2579. }
  2580. }