octeon_ddr.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020 Marvell International Ltd.
  4. */
  5. #include <command.h>
  6. #include <config.h>
  7. #include <dm.h>
  8. #include <hang.h>
  9. #include <i2c.h>
  10. #include <ram.h>
  11. #include <time.h>
  12. #include <asm/global_data.h>
  13. #include <asm/sections.h>
  14. #include <linux/io.h>
  15. #include <mach/octeon_ddr.h>
  16. #define CONFIG_REF_HERTZ 50000000
  17. DECLARE_GLOBAL_DATA_PTR;
  18. /* Sign of an integer */
  19. static s64 _sign(s64 v)
  20. {
  21. return (v < 0);
  22. }
  23. #ifndef DDR_NO_DEBUG
  24. char *lookup_env(struct ddr_priv *priv, const char *format, ...)
  25. {
  26. char *s;
  27. unsigned long value;
  28. va_list args;
  29. char buffer[64];
  30. va_start(args, format);
  31. vsnprintf(buffer, sizeof(buffer), format, args);
  32. va_end(args);
  33. s = ddr_getenv_debug(priv, buffer);
  34. if (s) {
  35. value = simple_strtoul(s, NULL, 0);
  36. printf("Parameter found in environment %s=\"%s\" 0x%lx (%ld)\n",
  37. buffer, s, value, value);
  38. }
  39. return s;
  40. }
  41. char *lookup_env_ull(struct ddr_priv *priv, const char *format, ...)
  42. {
  43. char *s;
  44. u64 value;
  45. va_list args;
  46. char buffer[64];
  47. va_start(args, format);
  48. vsnprintf(buffer, sizeof(buffer), format, args);
  49. va_end(args);
  50. s = ddr_getenv_debug(priv, buffer);
  51. if (s) {
  52. value = simple_strtoull(s, NULL, 0);
  53. printf("Parameter found in environment. %s = 0x%016llx\n",
  54. buffer, value);
  55. }
  56. return s;
  57. }
  58. #else
  59. char *lookup_env(struct ddr_priv *priv, const char *format, ...)
  60. {
  61. return NULL;
  62. }
  63. char *lookup_env_ull(struct ddr_priv *priv, const char *format, ...)
  64. {
  65. return NULL;
  66. }
  67. #endif
  68. /* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
  69. #define CVMX_L2C_TADS ((OCTEON_IS_MODEL(OCTEON_CN68XX) || \
  70. OCTEON_IS_MODEL(OCTEON_CN73XX) || \
  71. OCTEON_IS_MODEL(OCTEON_CNF75XX)) ? 4 : \
  72. (OCTEON_IS_MODEL(OCTEON_CN78XX)) ? 8 : 1)
  73. /* Number of L2C IOBs connected to LMC. */
  74. #define CVMX_L2C_IOBS ((OCTEON_IS_MODEL(OCTEON_CN68XX) || \
  75. OCTEON_IS_MODEL(OCTEON_CN78XX) || \
  76. OCTEON_IS_MODEL(OCTEON_CN73XX) || \
  77. OCTEON_IS_MODEL(OCTEON_CNF75XX)) ? 2 : 1)
  78. #define CVMX_L2C_MAX_MEMSZ_ALLOWED (OCTEON_IS_OCTEON2() ? \
  79. (32 * CVMX_L2C_TADS) : \
  80. (OCTEON_IS_MODEL(OCTEON_CN70XX) ? \
  81. 512 : (OCTEON_IS_OCTEON3() ? 1024 : 0)))
  82. /**
  83. * Initialize the BIG address in L2C+DRAM to generate proper error
  84. * on reading/writing to an non-existent memory location.
  85. *
  86. * @param node OCX CPU node number
  87. * @param mem_size Amount of DRAM configured in MB.
  88. * @param mode Allow/Disallow reporting errors L2C_INT_SUM[BIGRD,BIGWR].
  89. */
  90. static void cvmx_l2c_set_big_size(struct ddr_priv *priv, u64 mem_size, int mode)
  91. {
  92. if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) &&
  93. !OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) {
  94. union cvmx_l2c_big_ctl big_ctl;
  95. int bits = 0, zero_bits = 0;
  96. u64 mem;
  97. if (mem_size > (CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024ull)) {
  98. printf("WARNING: Invalid memory size(%lld) requested, should be <= %lld\n",
  99. mem_size,
  100. (u64)CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024);
  101. mem_size = CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024;
  102. }
  103. mem = mem_size;
  104. while (mem) {
  105. if ((mem & 1) == 0)
  106. zero_bits++;
  107. bits++;
  108. mem >>= 1;
  109. }
  110. if ((bits - zero_bits) != 1 || (bits - 9) <= 0) {
  111. printf("ERROR: Invalid DRAM size (%lld) requested, refer to L2C_BIG_CTL[maxdram] for valid options.\n",
  112. mem_size);
  113. return;
  114. }
  115. /*
  116. * The BIG/HOLE is logic is not supported in pass1 as per
  117. * Errata L2C-17736
  118. */
  119. if (mode == 0 && OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
  120. mode = 1;
  121. big_ctl.u64 = 0;
  122. big_ctl.s.maxdram = bits - 9;
  123. big_ctl.cn61xx.disable = mode;
  124. l2c_wr(priv, CVMX_L2C_BIG_CTL_REL, big_ctl.u64);
  125. }
  126. }
  127. static u32 octeon3_refclock(u32 alt_refclk, u32 ddr_hertz,
  128. struct dimm_config *dimm_config)
  129. {
  130. u32 ddr_ref_hertz = CONFIG_REF_HERTZ;
  131. int ddr_type;
  132. int spd_dimm_type;
  133. debug("%s(%u, %u, %p)\n", __func__, alt_refclk, ddr_hertz, dimm_config);
  134. /* Octeon 3 case... */
  135. /* we know whether alternate refclk is always wanted
  136. * we also know already if we want 2133 MT/s
  137. * if alt refclk not always wanted, then probe DDR and
  138. * DIMM type if DDR4 and RDIMMs, then set desired refclk
  139. * to 100MHz, otherwise to default (50MHz)
  140. * depend on ddr_initialize() to do the refclk selection
  141. * and validation/
  142. */
  143. if (alt_refclk) {
  144. /*
  145. * If alternate refclk was specified, let it override
  146. * everything
  147. */
  148. ddr_ref_hertz = alt_refclk * 1000000;
  149. printf("%s: DRAM init: %d MHz refclk is REQUESTED ALWAYS\n",
  150. __func__, alt_refclk);
  151. } else if (ddr_hertz > 1000000000) {
  152. ddr_type = get_ddr_type(dimm_config, 0);
  153. spd_dimm_type = get_dimm_module_type(dimm_config, 0, ddr_type);
  154. debug("ddr type: 0x%x, dimm type: 0x%x\n", ddr_type,
  155. spd_dimm_type);
  156. /* Is DDR4 and RDIMM just to be sure. */
  157. if (ddr_type == DDR4_DRAM &&
  158. (spd_dimm_type == 1 || spd_dimm_type == 5 ||
  159. spd_dimm_type == 8)) {
  160. /* Yes, we require 100MHz refclk, so set it. */
  161. ddr_ref_hertz = 100000000;
  162. puts("DRAM init: 100 MHz refclk is REQUIRED\n");
  163. }
  164. }
  165. debug("%s: speed: %u\n", __func__, ddr_ref_hertz);
  166. return ddr_ref_hertz;
  167. }
  168. int encode_row_lsb_ddr3(int row_lsb)
  169. {
  170. int row_lsb_start = 14;
  171. /* Decoding for row_lsb */
  172. /* 000: row_lsb = mem_adr[14] */
  173. /* 001: row_lsb = mem_adr[15] */
  174. /* 010: row_lsb = mem_adr[16] */
  175. /* 011: row_lsb = mem_adr[17] */
  176. /* 100: row_lsb = mem_adr[18] */
  177. /* 101: row_lsb = mem_adr[19] */
  178. /* 110: row_lsb = mem_adr[20] */
  179. /* 111: RESERVED */
  180. if (octeon_is_cpuid(OCTEON_CN6XXX) ||
  181. octeon_is_cpuid(OCTEON_CNF7XXX) || octeon_is_cpuid(OCTEON_CN7XXX))
  182. row_lsb_start = 14;
  183. else
  184. printf("ERROR: Unsupported Octeon model: 0x%x\n",
  185. read_c0_prid());
  186. return row_lsb - row_lsb_start;
  187. }
  188. int encode_pbank_lsb_ddr3(int pbank_lsb)
  189. {
  190. /* Decoding for pbank_lsb */
  191. /* 0000:DIMM = mem_adr[28] / rank = mem_adr[27] (if RANK_ENA) */
  192. /* 0001:DIMM = mem_adr[29] / rank = mem_adr[28] " */
  193. /* 0010:DIMM = mem_adr[30] / rank = mem_adr[29] " */
  194. /* 0011:DIMM = mem_adr[31] / rank = mem_adr[30] " */
  195. /* 0100:DIMM = mem_adr[32] / rank = mem_adr[31] " */
  196. /* 0101:DIMM = mem_adr[33] / rank = mem_adr[32] " */
  197. /* 0110:DIMM = mem_adr[34] / rank = mem_adr[33] " */
  198. /* 0111:DIMM = 0 / rank = mem_adr[34] " */
  199. /* 1000-1111: RESERVED */
  200. int pbank_lsb_start = 0;
  201. if (octeon_is_cpuid(OCTEON_CN6XXX) ||
  202. octeon_is_cpuid(OCTEON_CNF7XXX) || octeon_is_cpuid(OCTEON_CN7XXX))
  203. pbank_lsb_start = 28;
  204. else
  205. printf("ERROR: Unsupported Octeon model: 0x%x\n",
  206. read_c0_prid());
  207. return pbank_lsb - pbank_lsb_start;
  208. }
  209. static void set_ddr_clock_initialized(struct ddr_priv *priv, int if_num,
  210. bool inited_flag)
  211. {
  212. priv->ddr_clock_initialized[if_num] = inited_flag;
  213. }
  214. static int ddr_clock_initialized(struct ddr_priv *priv, int if_num)
  215. {
  216. return priv->ddr_clock_initialized[if_num];
  217. }
  218. static void set_ddr_memory_preserved(struct ddr_priv *priv)
  219. {
  220. priv->ddr_memory_preserved = true;
  221. }
  222. bool ddr_memory_preserved(struct ddr_priv *priv)
  223. {
  224. return priv->ddr_memory_preserved;
  225. }
  226. static void cn78xx_lmc_dreset_init(struct ddr_priv *priv, int if_num)
  227. {
  228. union cvmx_lmcx_dll_ctl2 dll_ctl2;
  229. /*
  230. * The remainder of this section describes the sequence for LMCn.
  231. *
  232. * 1. If not done already, write LMC(0..3)_DLL_CTL2 to its reset value
  233. * (except without changing the LMC(0..3)_DLL_CTL2[INTF_EN] value from
  234. * that set in the prior Step 3), including
  235. * LMC(0..3)_DLL_CTL2[DRESET] = 1.
  236. *
  237. * 2. Without changing any other LMC(0..3)_DLL_CTL2 fields, write
  238. * LMC(0..3)_DLL_CTL2[DLL_BRINGUP] = 1.
  239. */
  240. dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
  241. dll_ctl2.cn78xx.dll_bringup = 1;
  242. lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
  243. /*
  244. * 3. Read LMC(0..3)_DLL_CTL2 and wait for the result.
  245. */
  246. lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
  247. /*
  248. * 4. Wait for a minimum of 10 LMC CK cycles.
  249. */
  250. udelay(1);
  251. /*
  252. * 5. Without changing any other fields in LMC(0..3)_DLL_CTL2, write
  253. * LMC(0..3)_DLL_CTL2[QUAD_DLL_ENA] = 1.
  254. * LMC(0..3)_DLL_CTL2[QUAD_DLL_ENA] must not change after this point
  255. * without restarting the LMCn DRESET initialization sequence.
  256. */
  257. dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
  258. dll_ctl2.cn78xx.quad_dll_ena = 1;
  259. lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
  260. /*
  261. * 6. Read LMC(0..3)_DLL_CTL2 and wait for the result.
  262. */
  263. lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
  264. /*
  265. * 7. Wait a minimum of 10 us.
  266. */
  267. udelay(10);
  268. /*
  269. * 8. Without changing any other fields in LMC(0..3)_DLL_CTL2, write
  270. * LMC(0..3)_DLL_CTL2[DLL_BRINGUP] = 0.
  271. * LMC(0..3)_DLL_CTL2[DLL_BRINGUP] must not change after this point
  272. * without restarting the LMCn DRESET initialization sequence.
  273. */
  274. dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
  275. dll_ctl2.cn78xx.dll_bringup = 0;
  276. lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
  277. /*
  278. * 9. Read LMC(0..3)_DLL_CTL2 and wait for the result.
  279. */
  280. lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
  281. /*
  282. * 10. Without changing any other fields in LMC(0..3)_DLL_CTL2, write
  283. * LMC(0..3)_DLL_CTL2[DRESET] = 0.
  284. * LMC(0..3)_DLL_CTL2[DRESET] must not change after this point without
  285. * restarting the LMCn DRESET initialization sequence.
  286. *
  287. * After completing LMCn DRESET initialization, all LMC CSRs may be
  288. * accessed. Prior to completing LMC DRESET initialization, only
  289. * LMC(0..3)_DDR_PLL_CTL, LMC(0..3)_DLL_CTL2, LMC(0..3)_RESET_CTL, and
  290. * LMC(0..3)_COMP_CTL2 LMC CSRs can be accessed.
  291. */
  292. dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
  293. dll_ctl2.cn78xx.dreset = 0;
  294. lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
  295. }
  296. int initialize_ddr_clock(struct ddr_priv *priv, struct ddr_conf *ddr_conf,
  297. u32 cpu_hertz, u32 ddr_hertz, u32 ddr_ref_hertz,
  298. int if_num, u32 if_mask)
  299. {
  300. char *s;
  301. if (ddr_clock_initialized(priv, if_num))
  302. return 0;
  303. if (!ddr_clock_initialized(priv, 0)) { /* Do this once */
  304. union cvmx_lmcx_reset_ctl reset_ctl;
  305. int i;
  306. /*
  307. * Check to see if memory is to be preserved and set global
  308. * flag
  309. */
  310. for (i = 3; i >= 0; --i) {
  311. if ((if_mask & (1 << i)) == 0)
  312. continue;
  313. reset_ctl.u64 = lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
  314. if (reset_ctl.s.ddr3psv == 1) {
  315. debug("LMC%d Preserving memory\n", i);
  316. set_ddr_memory_preserved(priv);
  317. /* Re-initialize flags */
  318. reset_ctl.s.ddr3pwarm = 0;
  319. reset_ctl.s.ddr3psoft = 0;
  320. reset_ctl.s.ddr3psv = 0;
  321. lmc_wr(priv, CVMX_LMCX_RESET_CTL(i),
  322. reset_ctl.u64);
  323. }
  324. }
  325. }
  326. /*
  327. * ToDo: Add support for these SoCs:
  328. *
  329. * if (octeon_is_cpuid(OCTEON_CN63XX) ||
  330. * octeon_is_cpuid(OCTEON_CN66XX) ||
  331. * octeon_is_cpuid(OCTEON_CN61XX) || octeon_is_cpuid(OCTEON_CNF71XX))
  332. *
  333. * and
  334. *
  335. * if (octeon_is_cpuid(OCTEON_CN68XX))
  336. *
  337. * and
  338. *
  339. * if (octeon_is_cpuid(OCTEON_CN70XX))
  340. *
  341. */
  342. if (octeon_is_cpuid(OCTEON_CN78XX) || octeon_is_cpuid(OCTEON_CN73XX) ||
  343. octeon_is_cpuid(OCTEON_CNF75XX)) {
  344. union cvmx_lmcx_dll_ctl2 dll_ctl2;
  345. union cvmx_lmcx_dll_ctl3 ddr_dll_ctl3;
  346. union cvmx_lmcx_ddr_pll_ctl ddr_pll_ctl;
  347. struct dimm_config *dimm_config_table =
  348. ddr_conf->dimm_config_table;
  349. int en_idx, save_en_idx, best_en_idx = 0;
  350. u64 clkf, clkr, max_clkf = 127;
  351. u64 best_clkf = 0, best_clkr = 0;
  352. u64 best_pll_MHz = 0;
  353. u64 pll_MHz;
  354. u64 min_pll_MHz = 800;
  355. u64 max_pll_MHz = 5000;
  356. u64 error;
  357. u64 best_error;
  358. u64 best_calculated_ddr_hertz = 0;
  359. u64 calculated_ddr_hertz = 0;
  360. u64 orig_ddr_hertz = ddr_hertz;
  361. const int _en[] = { 1, 2, 3, 4, 5, 6, 7, 8, 10, 12 };
  362. int override_pll_settings;
  363. int new_bwadj;
  364. int ddr_type;
  365. int i;
  366. /* ddr_type only indicates DDR4 or DDR3 */
  367. ddr_type = (read_spd(&dimm_config_table[0], 0,
  368. DDR4_SPD_KEY_BYTE_DEVICE_TYPE) ==
  369. 0x0C) ? DDR4_DRAM : DDR3_DRAM;
  370. /*
  371. * 5.9 LMC Initialization Sequence
  372. *
  373. * There are 13 parts to the LMC initialization procedure:
  374. *
  375. * 1. DDR PLL initialization
  376. *
  377. * 2. LMC CK initialization
  378. *
  379. * 3. LMC interface enable initialization
  380. *
  381. * 4. LMC DRESET initialization
  382. *
  383. * 5. LMC CK local initialization
  384. *
  385. * 6. LMC RESET initialization
  386. *
  387. * 7. Early LMC initialization
  388. *
  389. * 8. LMC offset training
  390. *
  391. * 9. LMC internal Vref training
  392. *
  393. * 10. LMC deskew training
  394. *
  395. * 11. LMC write leveling
  396. *
  397. * 12. LMC read leveling
  398. *
  399. * 13. Final LMC initialization
  400. *
  401. * CN78XX supports two modes:
  402. *
  403. * - two-LMC mode: both LMCs 2/3 must not be enabled
  404. * (LMC2/3_DLL_CTL2[DRESET] must be set to 1 and
  405. * LMC2/3_DLL_CTL2[INTF_EN]
  406. * must be set to 0) and both LMCs 0/1 must be enabled).
  407. *
  408. * - four-LMC mode: all four LMCs 0..3 must be enabled.
  409. *
  410. * Steps 4 and 6..13 should each be performed for each
  411. * enabled LMC (either twice or four times). Steps 1..3 and
  412. * 5 are more global in nature and each must be executed
  413. * exactly once (not once per LMC) each time the DDR PLL
  414. * changes or is first brought up. Steps 1..3 and 5 need
  415. * not be performed if the DDR PLL is stable.
  416. *
  417. * Generally, the steps are performed in order. The exception
  418. * is that the CK local initialization (step 5) must be
  419. * performed after some DRESET initializations (step 4) and
  420. * before other DRESET initializations when the DDR PLL is
  421. * brought up or changed. (The CK local initialization uses
  422. * information from some LMCs to bring up the other local
  423. * CKs.) The following text describes these ordering
  424. * requirements in more detail.
  425. *
  426. * Following any chip reset, the DDR PLL must be brought up,
  427. * and all 13 steps should be executed. Subsequently, it is
  428. * possible to execute only steps 4 and 6..13, or to execute
  429. * only steps 8..13.
  430. *
  431. * The remainder of this section covers these initialization
  432. * steps in sequence.
  433. */
  434. /* Do the following init only once */
  435. if (if_num != 0)
  436. goto not_if0;
  437. /* Only for interface #0 ... */
  438. /*
  439. * 5.9.3 LMC Interface-Enable Initialization
  440. *
  441. * LMC interface-enable initialization (Step 3) must be#
  442. * performed after Step 2 for each chip reset and whenever
  443. * the DDR clock speed changes. This step needs to be
  444. * performed only once, not once per LMC. Perform the
  445. * following three substeps for the LMC interface-enable
  446. * initialization:
  447. *
  448. * 1. Without changing any other LMC2_DLL_CTL2 fields
  449. * (LMC(0..3)_DLL_CTL2 should be at their reset values after
  450. * Step 1), write LMC2_DLL_CTL2[INTF_EN] = 1 if four-LMC
  451. * mode is desired.
  452. *
  453. * 2. Without changing any other LMC3_DLL_CTL2 fields, write
  454. * LMC3_DLL_CTL2[INTF_EN] = 1 if four-LMC mode is desired.
  455. *
  456. * 3. Read LMC2_DLL_CTL2 and wait for the result.
  457. *
  458. * The LMC2_DLL_CTL2[INTF_EN] and LMC3_DLL_CTL2[INTF_EN]
  459. * values should not be changed by software from this point.
  460. */
  461. for (i = 0; i < 4; ++i) {
  462. if ((if_mask & (1 << i)) == 0)
  463. continue;
  464. dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));
  465. dll_ctl2.cn78xx.byp_setting = 0;
  466. dll_ctl2.cn78xx.byp_sel = 0;
  467. dll_ctl2.cn78xx.quad_dll_ena = 0;
  468. dll_ctl2.cn78xx.dreset = 1;
  469. dll_ctl2.cn78xx.dll_bringup = 0;
  470. dll_ctl2.cn78xx.intf_en = 0;
  471. lmc_wr(priv, CVMX_LMCX_DLL_CTL2(i), dll_ctl2.u64);
  472. }
  473. /*
  474. * ###### Interface enable (intf_en) deferred until after
  475. * DDR_DIV_RESET=0 #######
  476. */
  477. /*
  478. * 5.9.1 DDR PLL Initialization
  479. *
  480. * DDR PLL initialization (Step 1) must be performed for each
  481. * chip reset and whenever the DDR clock speed changes. This
  482. * step needs to be performed only once, not once per LMC.
  483. *
  484. * Perform the following eight substeps to initialize the
  485. * DDR PLL:
  486. *
  487. * 1. If not done already, write all fields in
  488. * LMC(0..3)_DDR_PLL_CTL and
  489. * LMC(0..1)_DLL_CTL2 to their reset values, including:
  490. *
  491. * .. LMC0_DDR_PLL_CTL[DDR_DIV_RESET] = 1
  492. * .. LMC0_DLL_CTL2[DRESET] = 1
  493. *
  494. * This substep is not necessary after a chip reset.
  495. *
  496. */
  497. ddr_pll_ctl.u64 = lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(0));
  498. ddr_pll_ctl.cn78xx.reset_n = 0;
  499. ddr_pll_ctl.cn78xx.ddr_div_reset = 1;
  500. ddr_pll_ctl.cn78xx.phy_dcok = 0;
  501. /*
  502. * 73XX pass 1.3 has LMC0 DCLK_INVERT tied to 1; earlier
  503. * 73xx passes are tied to 0
  504. *
  505. * 75XX needs LMC0 DCLK_INVERT set to 1 to minimize duty
  506. * cycle falling points
  507. *
  508. * and we default all other chips LMC0 to DCLK_INVERT=0
  509. */
  510. ddr_pll_ctl.cn78xx.dclk_invert =
  511. !!(octeon_is_cpuid(OCTEON_CN73XX_PASS1_3) ||
  512. octeon_is_cpuid(OCTEON_CNF75XX));
  513. /*
  514. * allow override of LMC0 desired setting for DCLK_INVERT,
  515. * but not on 73XX;
  516. * we cannot change LMC0 DCLK_INVERT on 73XX any pass
  517. */
  518. if (!(octeon_is_cpuid(OCTEON_CN73XX))) {
  519. s = lookup_env(priv, "ddr0_set_dclk_invert");
  520. if (s) {
  521. ddr_pll_ctl.cn78xx.dclk_invert =
  522. !!simple_strtoul(s, NULL, 0);
  523. debug("LMC0: override DDR_PLL_CTL[dclk_invert] to %d\n",
  524. ddr_pll_ctl.cn78xx.dclk_invert);
  525. }
  526. }
  527. lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(0), ddr_pll_ctl.u64);
  528. debug("%-45s : 0x%016llx\n", "LMC0: DDR_PLL_CTL",
  529. ddr_pll_ctl.u64);
  530. // only when LMC1 is active
  531. if (if_mask & 0x2) {
  532. /*
  533. * For CNF75XX, both LMC0 and LMC1 use the same PLL,
  534. * so we use the LMC0 setting of DCLK_INVERT for LMC1.
  535. */
  536. if (!octeon_is_cpuid(OCTEON_CNF75XX)) {
  537. int override = 0;
  538. /*
  539. * by default, for non-CNF75XX, we want
  540. * LMC1 toggled LMC0
  541. */
  542. int lmc0_dclk_invert =
  543. ddr_pll_ctl.cn78xx.dclk_invert;
  544. /*
  545. * FIXME: work-around for DDR3 UDIMM problems
  546. * is to use LMC0 setting on LMC1 and if
  547. * 73xx pass 1.3, we want to default LMC1
  548. * DCLK_INVERT to LMC0, not the invert of LMC0
  549. */
  550. int lmc1_dclk_invert;
  551. lmc1_dclk_invert =
  552. ((ddr_type == DDR4_DRAM) &&
  553. !octeon_is_cpuid(OCTEON_CN73XX_PASS1_3))
  554. ? lmc0_dclk_invert ^ 1 :
  555. lmc0_dclk_invert;
  556. /*
  557. * allow override of LMC1 desired setting for
  558. * DCLK_INVERT
  559. */
  560. s = lookup_env(priv, "ddr1_set_dclk_invert");
  561. if (s) {
  562. lmc1_dclk_invert =
  563. !!simple_strtoul(s, NULL, 0);
  564. override = 1;
  565. }
  566. debug("LMC1: %s DDR_PLL_CTL[dclk_invert] to %d (LMC0 %d)\n",
  567. (override) ? "override" :
  568. "default", lmc1_dclk_invert,
  569. lmc0_dclk_invert);
  570. ddr_pll_ctl.cn78xx.dclk_invert =
  571. lmc1_dclk_invert;
  572. }
  573. // but always write LMC1 CSR if it is active
  574. lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(1), ddr_pll_ctl.u64);
  575. debug("%-45s : 0x%016llx\n",
  576. "LMC1: DDR_PLL_CTL", ddr_pll_ctl.u64);
  577. }
  578. /*
  579. * 2. If the current DRAM contents are not preserved (see
  580. * LMC(0..3)_RESET_ CTL[DDR3PSV]), this is also an appropriate
  581. * time to assert the RESET# pin of the DDR3/DDR4 DRAM parts.
  582. * If desired, write
  583. * LMC0_RESET_ CTL[DDR3RST] = 0 without modifying any other
  584. * LMC0_RESET_CTL fields to assert the DDR_RESET_L pin.
  585. * No action is required here to assert DDR_RESET_L
  586. * following a chip reset. Refer to Section 5.9.6. Do this
  587. * for all enabled LMCs.
  588. */
  589. for (i = 0; (!ddr_memory_preserved(priv)) && i < 4; ++i) {
  590. union cvmx_lmcx_reset_ctl reset_ctl;
  591. if ((if_mask & (1 << i)) == 0)
  592. continue;
  593. reset_ctl.u64 = lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
  594. reset_ctl.cn78xx.ddr3rst = 0; /* Reset asserted */
  595. debug("LMC%d Asserting DDR_RESET_L\n", i);
  596. lmc_wr(priv, CVMX_LMCX_RESET_CTL(i), reset_ctl.u64);
  597. lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
  598. }
  599. /*
  600. * 3. Without changing any other LMC0_DDR_PLL_CTL values,
  601. * write LMC0_DDR_PLL_CTL[CLKF] with a value that gives a
  602. * desired DDR PLL speed. The LMC0_DDR_PLL_CTL[CLKF] value
  603. * should be selected in conjunction with the post-scalar
  604. * divider values for LMC (LMC0_DDR_PLL_CTL[DDR_PS_EN]) so
  605. * that the desired LMC CK speeds are is produced (all
  606. * enabled LMCs must run the same speed). Section 5.14
  607. * describes LMC0_DDR_PLL_CTL[CLKF] and
  608. * LMC0_DDR_PLL_CTL[DDR_PS_EN] programmings that produce
  609. * the desired LMC CK speed. Section 5.9.2 describes LMC CK
  610. * initialization, which can be done separately from the DDR
  611. * PLL initialization described in this section.
  612. *
  613. * The LMC0_DDR_PLL_CTL[CLKF] value must not change after
  614. * this point without restarting this SDRAM PLL
  615. * initialization sequence.
  616. */
  617. /* Init to max error */
  618. error = ddr_hertz;
  619. best_error = ddr_hertz;
  620. debug("DDR Reference Hertz = %d\n", ddr_ref_hertz);
  621. while (best_error == ddr_hertz) {
  622. for (clkr = 0; clkr < 4; ++clkr) {
  623. for (en_idx =
  624. sizeof(_en) / sizeof(int) -
  625. 1; en_idx >= 0; --en_idx) {
  626. save_en_idx = en_idx;
  627. clkf =
  628. ((ddr_hertz) *
  629. (clkr + 1) * (_en[save_en_idx]));
  630. clkf = divide_nint(clkf, ddr_ref_hertz)
  631. - 1;
  632. pll_MHz =
  633. ddr_ref_hertz *
  634. (clkf + 1) / (clkr + 1) / 1000000;
  635. calculated_ddr_hertz =
  636. ddr_ref_hertz *
  637. (clkf +
  638. 1) / ((clkr +
  639. 1) * (_en[save_en_idx]));
  640. error =
  641. ddr_hertz - calculated_ddr_hertz;
  642. if (pll_MHz < min_pll_MHz ||
  643. pll_MHz > max_pll_MHz)
  644. continue;
  645. if (clkf > max_clkf) {
  646. /*
  647. * PLL requires clkf to be
  648. * limited
  649. */
  650. continue;
  651. }
  652. if (abs(error) > abs(best_error))
  653. continue;
  654. debug("clkr: %2llu, en[%d]: %2d, clkf: %4llu, pll_MHz: %4llu, ddr_hertz: %8llu, error: %8lld\n",
  655. clkr, save_en_idx,
  656. _en[save_en_idx], clkf, pll_MHz,
  657. calculated_ddr_hertz, error);
  658. /* Favor the highest PLL frequency. */
  659. if (abs(error) < abs(best_error) ||
  660. pll_MHz > best_pll_MHz) {
  661. best_pll_MHz = pll_MHz;
  662. best_calculated_ddr_hertz =
  663. calculated_ddr_hertz;
  664. best_error = error;
  665. best_clkr = clkr;
  666. best_clkf = clkf;
  667. best_en_idx = save_en_idx;
  668. }
  669. }
  670. }
  671. override_pll_settings = 0;
  672. s = lookup_env(priv, "ddr_pll_clkr");
  673. if (s) {
  674. best_clkr = simple_strtoul(s, NULL, 0);
  675. override_pll_settings = 1;
  676. }
  677. s = lookup_env(priv, "ddr_pll_clkf");
  678. if (s) {
  679. best_clkf = simple_strtoul(s, NULL, 0);
  680. override_pll_settings = 1;
  681. }
  682. s = lookup_env(priv, "ddr_pll_en_idx");
  683. if (s) {
  684. best_en_idx = simple_strtoul(s, NULL, 0);
  685. override_pll_settings = 1;
  686. }
  687. if (override_pll_settings) {
  688. best_pll_MHz =
  689. ddr_ref_hertz * (best_clkf +
  690. 1) /
  691. (best_clkr + 1) / 1000000;
  692. best_calculated_ddr_hertz =
  693. ddr_ref_hertz * (best_clkf +
  694. 1) /
  695. ((best_clkr + 1) * (_en[best_en_idx]));
  696. best_error =
  697. ddr_hertz - best_calculated_ddr_hertz;
  698. }
  699. debug("clkr: %2llu, en[%d]: %2d, clkf: %4llu, pll_MHz: %4llu, ddr_hertz: %8llu, error: %8lld <==\n",
  700. best_clkr, best_en_idx, _en[best_en_idx],
  701. best_clkf, best_pll_MHz,
  702. best_calculated_ddr_hertz, best_error);
  703. /*
  704. * Try lowering the frequency if we can't get a
  705. * working configuration
  706. */
  707. if (best_error == ddr_hertz) {
  708. if (ddr_hertz < orig_ddr_hertz - 10000000)
  709. break;
  710. ddr_hertz -= 1000000;
  711. best_error = ddr_hertz;
  712. }
  713. }
  714. if (best_error == ddr_hertz) {
  715. printf("ERROR: Can not compute a legal DDR clock speed configuration.\n");
  716. return -1;
  717. }
  718. new_bwadj = (best_clkf + 1) / 10;
  719. debug("bwadj: %2d\n", new_bwadj);
  720. s = lookup_env(priv, "ddr_pll_bwadj");
  721. if (s) {
  722. new_bwadj = strtoul(s, NULL, 0);
  723. debug("bwadj: %2d\n", new_bwadj);
  724. }
  725. for (i = 0; i < 2; ++i) {
  726. if ((if_mask & (1 << i)) == 0)
  727. continue;
  728. ddr_pll_ctl.u64 =
  729. lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
  730. debug("LMC%d: DDR_PLL_CTL : 0x%016llx\n",
  731. i, ddr_pll_ctl.u64);
  732. ddr_pll_ctl.cn78xx.ddr_ps_en = best_en_idx;
  733. ddr_pll_ctl.cn78xx.clkf = best_clkf;
  734. ddr_pll_ctl.cn78xx.clkr = best_clkr;
  735. ddr_pll_ctl.cn78xx.reset_n = 0;
  736. ddr_pll_ctl.cn78xx.bwadj = new_bwadj;
  737. lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
  738. debug("LMC%d: DDR_PLL_CTL : 0x%016llx\n",
  739. i, ddr_pll_ctl.u64);
  740. /*
  741. * For cnf75xx LMC0 and LMC1 use the same PLL so
  742. * only program LMC0 PLL.
  743. */
  744. if (octeon_is_cpuid(OCTEON_CNF75XX))
  745. break;
  746. }
  747. for (i = 0; i < 4; ++i) {
  748. if ((if_mask & (1 << i)) == 0)
  749. continue;
  750. /*
  751. * 4. Read LMC0_DDR_PLL_CTL and wait for the result.
  752. */
  753. lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
  754. /*
  755. * 5. Wait a minimum of 3 us.
  756. */
  757. udelay(3); /* Wait 3 us */
  758. /*
  759. * 6. Write LMC0_DDR_PLL_CTL[RESET_N] = 1 without
  760. * changing any other LMC0_DDR_PLL_CTL values.
  761. */
  762. ddr_pll_ctl.u64 =
  763. lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
  764. ddr_pll_ctl.cn78xx.reset_n = 1;
  765. lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
  766. /*
  767. * 7. Read LMC0_DDR_PLL_CTL and wait for the result.
  768. */
  769. lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
  770. /*
  771. * 8. Wait a minimum of 25 us.
  772. */
  773. udelay(25); /* Wait 25 us */
  774. /*
  775. * For cnf75xx LMC0 and LMC1 use the same PLL so
  776. * only program LMC0 PLL.
  777. */
  778. if (octeon_is_cpuid(OCTEON_CNF75XX))
  779. break;
  780. }
  781. for (i = 0; i < 4; ++i) {
  782. if ((if_mask & (1 << i)) == 0)
  783. continue;
  784. /*
  785. * 5.9.2 LMC CK Initialization
  786. *
  787. * DDR PLL initialization must be completed prior to
  788. * starting LMC CK initialization.
  789. *
  790. * Perform the following substeps to initialize the
  791. * LMC CK:
  792. *
  793. * 1. Without changing any other LMC(0..3)_DDR_PLL_CTL
  794. * values, write
  795. * LMC(0..3)_DDR_PLL_CTL[DDR_DIV_RESET] = 1 and
  796. * LMC(0..3)_DDR_PLL_CTL[DDR_PS_EN] with the
  797. * appropriate value to get the desired LMC CK speed.
  798. * Section 5.14 discusses CLKF and DDR_PS_EN
  799. * programmings. The LMC(0..3)_DDR_PLL_CTL[DDR_PS_EN]
  800. * must not change after this point without restarting
  801. * this LMC CK initialization sequence.
  802. */
  803. ddr_pll_ctl.u64 = lmc_rd(priv,
  804. CVMX_LMCX_DDR_PLL_CTL(i));
  805. ddr_pll_ctl.cn78xx.ddr_div_reset = 1;
  806. lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
  807. /*
  808. * 2. Without changing any other fields in
  809. * LMC(0..3)_DDR_PLL_CTL, write
  810. * LMC(0..3)_DDR_PLL_CTL[DDR4_MODE] = 0.
  811. */
  812. ddr_pll_ctl.u64 =
  813. lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
  814. ddr_pll_ctl.cn78xx.ddr4_mode =
  815. (ddr_type == DDR4_DRAM) ? 1 : 0;
  816. lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
  817. /*
  818. * 3. Read LMC(0..3)_DDR_PLL_CTL and wait for the
  819. * result.
  820. */
  821. lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
  822. /*
  823. * 4. Wait a minimum of 1 us.
  824. */
  825. udelay(1); /* Wait 1 us */
  826. /*
  827. * ###### Steps 5 through 7 deferred until after
  828. * DDR_DIV_RESET=0 #######
  829. */
  830. /*
  831. * 8. Without changing any other LMC(0..3)_COMP_CTL2
  832. * values, write
  833. * LMC(0..3)_COMP_CTL2[CK_CTL,CONTROL_CTL,CMD_CTL]
  834. * to the desired DDR*_CK_*_P control and command
  835. * signals drive strength.
  836. */
  837. union cvmx_lmcx_comp_ctl2 comp_ctl2;
  838. const struct ddr3_custom_config *custom_lmc_config =
  839. &ddr_conf->custom_lmc_config;
  840. comp_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_COMP_CTL2(i));
  841. /* Default 4=34.3 ohm */
  842. comp_ctl2.cn78xx.dqx_ctl =
  843. (custom_lmc_config->dqx_ctl ==
  844. 0) ? 4 : custom_lmc_config->dqx_ctl;
  845. /* Default 4=34.3 ohm */
  846. comp_ctl2.cn78xx.ck_ctl =
  847. (custom_lmc_config->ck_ctl ==
  848. 0) ? 4 : custom_lmc_config->ck_ctl;
  849. /* Default 4=34.3 ohm */
  850. comp_ctl2.cn78xx.cmd_ctl =
  851. (custom_lmc_config->cmd_ctl ==
  852. 0) ? 4 : custom_lmc_config->cmd_ctl;
  853. comp_ctl2.cn78xx.rodt_ctl = 0x4; /* 60 ohm */
  854. comp_ctl2.cn70xx.ptune_offset =
  855. (abs(custom_lmc_config->ptune_offset) & 0x7)
  856. | (_sign(custom_lmc_config->ptune_offset) << 3);
  857. comp_ctl2.cn70xx.ntune_offset =
  858. (abs(custom_lmc_config->ntune_offset) & 0x7)
  859. | (_sign(custom_lmc_config->ntune_offset) << 3);
  860. s = lookup_env(priv, "ddr_clk_ctl");
  861. if (s) {
  862. comp_ctl2.cn78xx.ck_ctl =
  863. simple_strtoul(s, NULL, 0);
  864. }
  865. s = lookup_env(priv, "ddr_ck_ctl");
  866. if (s) {
  867. comp_ctl2.cn78xx.ck_ctl =
  868. simple_strtoul(s, NULL, 0);
  869. }
  870. s = lookup_env(priv, "ddr_cmd_ctl");
  871. if (s) {
  872. comp_ctl2.cn78xx.cmd_ctl =
  873. simple_strtoul(s, NULL, 0);
  874. }
  875. s = lookup_env(priv, "ddr_dqx_ctl");
  876. if (s) {
  877. comp_ctl2.cn78xx.dqx_ctl =
  878. simple_strtoul(s, NULL, 0);
  879. }
  880. s = lookup_env(priv, "ddr_ptune_offset");
  881. if (s) {
  882. comp_ctl2.cn78xx.ptune_offset =
  883. simple_strtoul(s, NULL, 0);
  884. }
  885. s = lookup_env(priv, "ddr_ntune_offset");
  886. if (s) {
  887. comp_ctl2.cn78xx.ntune_offset =
  888. simple_strtoul(s, NULL, 0);
  889. }
  890. lmc_wr(priv, CVMX_LMCX_COMP_CTL2(i), comp_ctl2.u64);
  891. /*
  892. * 9. Read LMC(0..3)_DDR_PLL_CTL and wait for the
  893. * result.
  894. */
  895. lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
  896. /*
  897. * 10. Wait a minimum of 200 ns.
  898. */
  899. udelay(1); /* Wait 1 us */
  900. /*
  901. * 11. Without changing any other
  902. * LMC(0..3)_DDR_PLL_CTL values, write
  903. * LMC(0..3)_DDR_PLL_CTL[DDR_DIV_RESET] = 0.
  904. */
  905. ddr_pll_ctl.u64 = lmc_rd(priv,
  906. CVMX_LMCX_DDR_PLL_CTL(i));
  907. ddr_pll_ctl.cn78xx.ddr_div_reset = 0;
  908. lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
  909. /*
  910. * 12. Read LMC(0..3)_DDR_PLL_CTL and wait for the
  911. * result.
  912. */
  913. lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
  914. /*
  915. * 13. Wait a minimum of 200 ns.
  916. */
  917. udelay(1); /* Wait 1 us */
  918. }
  919. /*
  920. * Relocated Interface Enable (intf_en) Step
  921. */
  922. for (i = (octeon_is_cpuid(OCTEON_CN73XX) ||
  923. octeon_is_cpuid(OCTEON_CNF75XX)) ? 1 : 2;
  924. i < 4; ++i) {
  925. /*
  926. * This step is only necessary for LMC 2 and 3 in
  927. * 4-LMC mode. The mask will cause the unpopulated
  928. * interfaces to be skipped.
  929. */
  930. if ((if_mask & (1 << i)) == 0)
  931. continue;
  932. dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));
  933. dll_ctl2.cn78xx.intf_en = 1;
  934. lmc_wr(priv, CVMX_LMCX_DLL_CTL2(i), dll_ctl2.u64);
  935. lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));
  936. }
  937. /*
  938. * Relocated PHY_DCOK Step
  939. */
  940. for (i = 0; i < 4; ++i) {
  941. if ((if_mask & (1 << i)) == 0)
  942. continue;
  943. /*
  944. * 5. Without changing any other fields in
  945. * LMC(0..3)_DDR_PLL_CTL, write
  946. * LMC(0..3)_DDR_PLL_CTL[PHY_DCOK] = 1.
  947. */
  948. ddr_pll_ctl.u64 = lmc_rd(priv,
  949. CVMX_LMCX_DDR_PLL_CTL(i));
  950. ddr_pll_ctl.cn78xx.phy_dcok = 1;
  951. lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
  952. /*
  953. * 6. Read LMC(0..3)_DDR_PLL_CTL and wait for
  954. * the result.
  955. */
  956. lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
  957. /*
  958. * 7. Wait a minimum of 20 us.
  959. */
  960. udelay(20); /* Wait 20 us */
  961. }
  962. /*
  963. * 5.9.4 LMC DRESET Initialization
  964. *
  965. * All of the DDR PLL, LMC global CK, and LMC interface
  966. * enable initializations must be completed prior to starting
  967. * this LMC DRESET initialization (Step 4).
  968. *
  969. * This LMC DRESET step is done for all enabled LMCs.
  970. *
  971. * There are special constraints on the ordering of DRESET
  972. * initialization (Steps 4) and CK local initialization
  973. * (Step 5) whenever CK local initialization must be executed.
  974. * CK local initialization must be executed whenever the DDR
  975. * PLL is being brought up (for each chip reset* and whenever
  976. * the DDR clock speed changes).
  977. *
  978. * When Step 5 must be executed in the two-LMC mode case:
  979. * - LMC0 DRESET initialization must occur before Step 5.
  980. * - LMC1 DRESET initialization must occur after Step 5.
  981. *
  982. * When Step 5 must be executed in the four-LMC mode case:
  983. * - LMC2 and LMC3 DRESET initialization must occur before
  984. * Step 5.
  985. * - LMC0 and LMC1 DRESET initialization must occur after
  986. * Step 5.
  987. */
  988. if (octeon_is_cpuid(OCTEON_CN73XX)) {
  989. /* ONE-LMC or TWO-LMC MODE BEFORE STEP 5 for cn73xx */
  990. cn78xx_lmc_dreset_init(priv, 0);
  991. } else if (octeon_is_cpuid(OCTEON_CNF75XX)) {
  992. if (if_mask == 0x3) {
  993. /*
  994. * 2-LMC Mode: LMC1 DRESET must occur
  995. * before Step 5
  996. */
  997. cn78xx_lmc_dreset_init(priv, 1);
  998. }
  999. } else {
  1000. /* TWO-LMC MODE DRESET BEFORE STEP 5 */
  1001. if (if_mask == 0x3)
  1002. cn78xx_lmc_dreset_init(priv, 0);
  1003. /* FOUR-LMC MODE BEFORE STEP 5 */
  1004. if (if_mask == 0xf) {
  1005. cn78xx_lmc_dreset_init(priv, 2);
  1006. cn78xx_lmc_dreset_init(priv, 3);
  1007. }
  1008. }
  1009. /*
  1010. * 5.9.5 LMC CK Local Initialization
  1011. *
  1012. * All of DDR PLL, LMC global CK, and LMC interface-enable
  1013. * initializations must be completed prior to starting this
  1014. * LMC CK local initialization (Step 5).
  1015. *
  1016. * LMC CK Local initialization must be performed for each
  1017. * chip reset and whenever the DDR clock speed changes. This
  1018. * step needs to be performed only once, not once per LMC.
  1019. *
  1020. * There are special constraints on the ordering of DRESET
  1021. * initialization (Steps 4) and CK local initialization
  1022. * (Step 5) whenever CK local initialization must be executed.
  1023. * CK local initialization must be executed whenever the
  1024. * DDR PLL is being brought up (for each chip reset and
  1025. * whenever the DDR clock speed changes).
  1026. *
  1027. * When Step 5 must be executed in the two-LMC mode case:
  1028. * - LMC0 DRESET initialization must occur before Step 5.
  1029. * - LMC1 DRESET initialization must occur after Step 5.
  1030. *
  1031. * When Step 5 must be executed in the four-LMC mode case:
  1032. * - LMC2 and LMC3 DRESET initialization must occur before
  1033. * Step 5.
  1034. * - LMC0 and LMC1 DRESET initialization must occur after
  1035. * Step 5.
  1036. *
  1037. * LMC CK local initialization is different depending on
  1038. * whether two-LMC or four-LMC modes are desired.
  1039. */
  1040. if (if_mask == 0x3) {
  1041. int temp_lmc_if_num = octeon_is_cpuid(OCTEON_CNF75XX) ?
  1042. 1 : 0;
  1043. /*
  1044. * 5.9.5.1 LMC CK Local Initialization for Two-LMC
  1045. * Mode
  1046. *
  1047. * 1. Write LMC0_DLL_CTL3 to its reset value. (Note
  1048. * that LMC0_DLL_CTL3[DLL_90_BYTE_SEL] = 0x2 .. 0x8
  1049. * should also work.)
  1050. */
  1051. ddr_dll_ctl3.u64 = 0;
  1052. ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
  1053. if (octeon_is_cpuid(OCTEON_CNF75XX))
  1054. ddr_dll_ctl3.cn78xx.dll90_byte_sel = 7;
  1055. else
  1056. ddr_dll_ctl3.cn78xx.dll90_byte_sel = 1;
  1057. lmc_wr(priv,
  1058. CVMX_LMCX_DLL_CTL3(temp_lmc_if_num),
  1059. ddr_dll_ctl3.u64);
  1060. /*
  1061. * 2. Read LMC0_DLL_CTL3 and wait for the result.
  1062. */
  1063. lmc_rd(priv, CVMX_LMCX_DLL_CTL3(temp_lmc_if_num));
  1064. /*
  1065. * 3. Without changing any other fields in
  1066. * LMC0_DLL_CTL3, write
  1067. * LMC0_DLL_CTL3[DCLK90_FWD] = 1. Writing
  1068. * LMC0_DLL_CTL3[DCLK90_FWD] = 1
  1069. * causes clock-delay information to be forwarded
  1070. * from LMC0 to LMC1.
  1071. */
  1072. ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
  1073. lmc_wr(priv,
  1074. CVMX_LMCX_DLL_CTL3(temp_lmc_if_num),
  1075. ddr_dll_ctl3.u64);
  1076. /*
  1077. * 4. Read LMC0_DLL_CTL3 and wait for the result.
  1078. */
  1079. lmc_rd(priv, CVMX_LMCX_DLL_CTL3(temp_lmc_if_num));
  1080. }
  1081. if (if_mask == 0xf) {
  1082. /*
  1083. * 5.9.5.2 LMC CK Local Initialization for Four-LMC
  1084. * Mode
  1085. *
  1086. * 1. Write LMC2_DLL_CTL3 to its reset value except
  1087. * LMC2_DLL_CTL3[DLL90_BYTE_SEL] = 0x7.
  1088. */
  1089. ddr_dll_ctl3.u64 = 0;
  1090. ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
  1091. ddr_dll_ctl3.cn78xx.dll90_byte_sel = 7;
  1092. lmc_wr(priv, CVMX_LMCX_DLL_CTL3(2), ddr_dll_ctl3.u64);
  1093. /*
  1094. * 2. Write LMC3_DLL_CTL3 to its reset value except
  1095. * LMC3_DLL_CTL3[DLL90_BYTE_SEL] = 0x2.
  1096. */
  1097. ddr_dll_ctl3.u64 = 0;
  1098. ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
  1099. ddr_dll_ctl3.cn78xx.dll90_byte_sel = 2;
  1100. lmc_wr(priv, CVMX_LMCX_DLL_CTL3(3), ddr_dll_ctl3.u64);
  1101. /*
  1102. * 3. Read LMC3_DLL_CTL3 and wait for the result.
  1103. */
  1104. lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));
  1105. /*
  1106. * 4. Without changing any other fields in
  1107. * LMC2_DLL_CTL3, write LMC2_DLL_CTL3[DCLK90_FWD] = 1
  1108. * and LMC2_DLL_CTL3[DCLK90_RECAL_ DIS] = 1.
  1109. * Writing LMC2_DLL_CTL3[DCLK90_FWD] = 1 causes LMC 2
  1110. * to forward clockdelay information to LMC0. Setting
  1111. * LMC2_DLL_CTL3[DCLK90_RECAL_DIS] to 1 prevents LMC2
  1112. * from periodically recalibrating this delay
  1113. * information.
  1114. */
  1115. ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(2));
  1116. ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
  1117. ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
  1118. lmc_wr(priv, CVMX_LMCX_DLL_CTL3(2), ddr_dll_ctl3.u64);
  1119. /*
  1120. * 5. Without changing any other fields in
  1121. * LMC3_DLL_CTL3, write LMC3_DLL_CTL3[DCLK90_FWD] = 1
  1122. * and LMC3_DLL_CTL3[DCLK90_RECAL_ DIS] = 1.
  1123. * Writing LMC3_DLL_CTL3[DCLK90_FWD] = 1 causes LMC3
  1124. * to forward clockdelay information to LMC1. Setting
  1125. * LMC3_DLL_CTL3[DCLK90_RECAL_DIS] to 1 prevents LMC3
  1126. * from periodically recalibrating this delay
  1127. * information.
  1128. */
  1129. ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));
  1130. ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
  1131. ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
  1132. lmc_wr(priv, CVMX_LMCX_DLL_CTL3(3), ddr_dll_ctl3.u64);
  1133. /*
  1134. * 6. Read LMC3_DLL_CTL3 and wait for the result.
  1135. */
  1136. lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));
  1137. }
  1138. if (octeon_is_cpuid(OCTEON_CNF75XX)) {
  1139. /*
  1140. * cnf75xx 2-LMC Mode: LMC0 DRESET must occur after
  1141. * Step 5, Do LMC0 for 1-LMC Mode here too
  1142. */
  1143. cn78xx_lmc_dreset_init(priv, 0);
  1144. }
  1145. /* TWO-LMC MODE AFTER STEP 5 */
  1146. if (if_mask == 0x3) {
  1147. if (octeon_is_cpuid(OCTEON_CNF75XX)) {
  1148. /*
  1149. * cnf75xx 2-LMC Mode: LMC0 DRESET must
  1150. * occur after Step 5
  1151. */
  1152. cn78xx_lmc_dreset_init(priv, 0);
  1153. } else {
  1154. cn78xx_lmc_dreset_init(priv, 1);
  1155. }
  1156. }
  1157. /* FOUR-LMC MODE AFTER STEP 5 */
  1158. if (if_mask == 0xf) {
  1159. cn78xx_lmc_dreset_init(priv, 0);
  1160. cn78xx_lmc_dreset_init(priv, 1);
  1161. /*
  1162. * Enable periodic recalibration of DDR90 delay
  1163. * line in.
  1164. */
  1165. ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(0));
  1166. ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 0;
  1167. lmc_wr(priv, CVMX_LMCX_DLL_CTL3(0), ddr_dll_ctl3.u64);
  1168. ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(1));
  1169. ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 0;
  1170. lmc_wr(priv, CVMX_LMCX_DLL_CTL3(1), ddr_dll_ctl3.u64);
  1171. }
  1172. /* Enable fine tune mode for all LMCs */
  1173. for (i = 0; i < 4; ++i) {
  1174. if ((if_mask & (1 << i)) == 0)
  1175. continue;
  1176. ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(i));
  1177. ddr_dll_ctl3.cn78xx.fine_tune_mode = 1;
  1178. lmc_wr(priv, CVMX_LMCX_DLL_CTL3(i), ddr_dll_ctl3.u64);
  1179. }
  1180. /*
  1181. * Enable the trim circuit on the appropriate channels to
  1182. * adjust the DDR clock duty cycle for chips that support
  1183. * it
  1184. */
  1185. if (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X) ||
  1186. octeon_is_cpuid(OCTEON_CN73XX) ||
  1187. octeon_is_cpuid(OCTEON_CNF75XX)) {
  1188. union cvmx_lmcx_phy_ctl lmc_phy_ctl;
  1189. int i;
  1190. for (i = 0; i < 4; ++i) {
  1191. if ((if_mask & (1 << i)) == 0)
  1192. continue;
  1193. lmc_phy_ctl.u64 =
  1194. lmc_rd(priv, CVMX_LMCX_PHY_CTL(i));
  1195. if (octeon_is_cpuid(OCTEON_CNF75XX) ||
  1196. octeon_is_cpuid(OCTEON_CN73XX_PASS1_3)) {
  1197. /* Both LMCs */
  1198. lmc_phy_ctl.s.lv_mode = 0;
  1199. } else {
  1200. /* Odd LMCs = 0, Even LMCs = 1 */
  1201. lmc_phy_ctl.s.lv_mode = (~i) & 1;
  1202. }
  1203. debug("LMC%d: PHY_CTL : 0x%016llx\n",
  1204. i, lmc_phy_ctl.u64);
  1205. lmc_wr(priv, CVMX_LMCX_PHY_CTL(i),
  1206. lmc_phy_ctl.u64);
  1207. }
  1208. }
  1209. }
  1210. /*
  1211. * 5.9.6 LMC RESET Initialization
  1212. *
  1213. * NOTE: this is now done as the first step in
  1214. * init_octeon3_ddr3_interface, rather than the last step in clock
  1215. * init. This reorg allows restarting per-LMC initialization should
  1216. * problems be encountered, rather than being forced to resort to
  1217. * resetting the chip and starting all over.
  1218. *
  1219. * Look for the code in octeon3_lmc.c: perform_lmc_reset().
  1220. */
  1221. /* Fallthrough for all interfaces... */
  1222. not_if0:
  1223. /*
  1224. * Start the DDR clock so that its frequency can be measured.
  1225. * For some chips we must activate the memory controller with
  1226. * init_start to make the DDR clock start to run.
  1227. */
  1228. if ((!octeon_is_cpuid(OCTEON_CN6XXX)) &&
  1229. (!octeon_is_cpuid(OCTEON_CNF7XXX)) &&
  1230. (!octeon_is_cpuid(OCTEON_CN7XXX))) {
  1231. union cvmx_lmcx_mem_cfg0 mem_cfg0;
  1232. mem_cfg0.u64 = 0;
  1233. mem_cfg0.s.init_start = 1;
  1234. lmc_wr(priv, CVMX_LMCX_MEM_CFG0(if_num), mem_cfg0.u64);
  1235. lmc_rd(priv, CVMX_LMCX_MEM_CFG0(if_num));
  1236. }
  1237. set_ddr_clock_initialized(priv, if_num, 1);
  1238. return 0;
  1239. }
  1240. static void octeon_ipd_delay_cycles(u64 cycles)
  1241. {
  1242. u64 start = csr_rd(CVMX_IPD_CLK_COUNT);
  1243. while (start + cycles > csr_rd(CVMX_IPD_CLK_COUNT))
  1244. ;
  1245. }
  1246. static void octeon_ipd_delay_cycles_o3(u64 cycles)
  1247. {
  1248. u64 start = csr_rd(CVMX_FPA_CLK_COUNT);
  1249. while (start + cycles > csr_rd(CVMX_FPA_CLK_COUNT))
  1250. ;
  1251. }
  1252. static u32 measure_octeon_ddr_clock(struct ddr_priv *priv,
  1253. struct ddr_conf *ddr_conf, u32 cpu_hertz,
  1254. u32 ddr_hertz, u32 ddr_ref_hertz,
  1255. int if_num, u32 if_mask)
  1256. {
  1257. u64 core_clocks;
  1258. u64 ddr_clocks;
  1259. u64 calc_ddr_hertz;
  1260. if (ddr_conf) {
  1261. if (initialize_ddr_clock(priv, ddr_conf, cpu_hertz,
  1262. ddr_hertz, ddr_ref_hertz, if_num,
  1263. if_mask) != 0)
  1264. return 0;
  1265. }
  1266. /* Dynamically determine the DDR clock speed */
  1267. if (OCTEON_IS_OCTEON2() || octeon_is_cpuid(OCTEON_CN70XX)) {
  1268. core_clocks = csr_rd(CVMX_IPD_CLK_COUNT);
  1269. ddr_clocks = lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num));
  1270. /* How many cpu cycles to measure over */
  1271. octeon_ipd_delay_cycles(100000000);
  1272. core_clocks = csr_rd(CVMX_IPD_CLK_COUNT) - core_clocks;
  1273. ddr_clocks =
  1274. lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num)) - ddr_clocks;
  1275. calc_ddr_hertz = ddr_clocks * gd->bus_clk / core_clocks;
  1276. } else if (octeon_is_cpuid(OCTEON_CN7XXX)) {
  1277. core_clocks = csr_rd(CVMX_FPA_CLK_COUNT);
  1278. ddr_clocks = lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num));
  1279. /* How many cpu cycles to measure over */
  1280. octeon_ipd_delay_cycles_o3(100000000);
  1281. core_clocks = csr_rd(CVMX_FPA_CLK_COUNT) - core_clocks;
  1282. ddr_clocks =
  1283. lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num)) - ddr_clocks;
  1284. calc_ddr_hertz = ddr_clocks * gd->bus_clk / core_clocks;
  1285. } else {
  1286. core_clocks = csr_rd(CVMX_IPD_CLK_COUNT);
  1287. /*
  1288. * ignore overflow, starts counting when we enable the
  1289. * controller
  1290. */
  1291. ddr_clocks = lmc_rd(priv, CVMX_LMCX_DCLK_CNT_LO(if_num));
  1292. /* How many cpu cycles to measure over */
  1293. octeon_ipd_delay_cycles(100000000);
  1294. core_clocks = csr_rd(CVMX_IPD_CLK_COUNT) - core_clocks;
  1295. ddr_clocks =
  1296. lmc_rd(priv, CVMX_LMCX_DCLK_CNT_LO(if_num)) - ddr_clocks;
  1297. calc_ddr_hertz = ddr_clocks * cpu_hertz / core_clocks;
  1298. }
  1299. debug("core clocks: %llu, ddr clocks: %llu, calc rate: %llu\n",
  1300. core_clocks, ddr_clocks, calc_ddr_hertz);
  1301. debug("LMC%d: Measured DDR clock: %lld, cpu clock: %u, ddr clocks: %llu\n",
  1302. if_num, calc_ddr_hertz, cpu_hertz, ddr_clocks);
  1303. /* Check for unreasonable settings. */
  1304. if (calc_ddr_hertz < 10000) {
  1305. udelay(8000000 * 100);
  1306. printf("DDR clock misconfigured on interface %d. Resetting...\n",
  1307. if_num);
  1308. do_reset(NULL, 0, 0, NULL);
  1309. }
  1310. return calc_ddr_hertz;
  1311. }
  1312. u64 lmc_ddr3_rl_dbg_read(struct ddr_priv *priv, int if_num, int idx)
  1313. {
  1314. union cvmx_lmcx_rlevel_dbg rlevel_dbg;
  1315. union cvmx_lmcx_rlevel_ctl rlevel_ctl;
  1316. rlevel_ctl.u64 = lmc_rd(priv, CVMX_LMCX_RLEVEL_CTL(if_num));
  1317. rlevel_ctl.s.byte = idx;
  1318. lmc_wr(priv, CVMX_LMCX_RLEVEL_CTL(if_num), rlevel_ctl.u64);
  1319. lmc_rd(priv, CVMX_LMCX_RLEVEL_CTL(if_num));
  1320. rlevel_dbg.u64 = lmc_rd(priv, CVMX_LMCX_RLEVEL_DBG(if_num));
  1321. return rlevel_dbg.s.bitmask;
  1322. }
  1323. u64 lmc_ddr3_wl_dbg_read(struct ddr_priv *priv, int if_num, int idx)
  1324. {
  1325. union cvmx_lmcx_wlevel_dbg wlevel_dbg;
  1326. wlevel_dbg.u64 = 0;
  1327. wlevel_dbg.s.byte = idx;
  1328. lmc_wr(priv, CVMX_LMCX_WLEVEL_DBG(if_num), wlevel_dbg.u64);
  1329. lmc_rd(priv, CVMX_LMCX_WLEVEL_DBG(if_num));
  1330. wlevel_dbg.u64 = lmc_rd(priv, CVMX_LMCX_WLEVEL_DBG(if_num));
  1331. return wlevel_dbg.s.bitmask;
  1332. }
  1333. int validate_ddr3_rlevel_bitmask(struct rlevel_bitmask *rlevel_bitmask_p,
  1334. int ddr_type)
  1335. {
  1336. int i;
  1337. int errors = 0;
  1338. u64 mask = 0; /* Used in 64-bit comparisons */
  1339. u8 mstart = 0;
  1340. u8 width = 0;
  1341. u8 firstbit = 0;
  1342. u8 lastbit = 0;
  1343. u8 bubble = 0;
  1344. u8 tbubble = 0;
  1345. u8 blank = 0;
  1346. u8 narrow = 0;
  1347. u8 trailing = 0;
  1348. u64 bitmask = rlevel_bitmask_p->bm;
  1349. u8 extras = 0;
  1350. u8 toolong = 0;
  1351. u64 temp;
  1352. if (bitmask == 0) {
  1353. blank += RLEVEL_BITMASK_BLANK_ERROR;
  1354. } else {
  1355. /* Look for fb, the first bit */
  1356. temp = bitmask;
  1357. while (!(temp & 1)) {
  1358. firstbit++;
  1359. temp >>= 1;
  1360. }
  1361. /* Look for lb, the last bit */
  1362. lastbit = firstbit;
  1363. while ((temp >>= 1))
  1364. lastbit++;
  1365. /*
  1366. * Start with the max range to try to find the largest mask
  1367. * within the bitmask data
  1368. */
  1369. width = MASKRANGE_BITS;
  1370. for (mask = MASKRANGE; mask > 0; mask >>= 1, --width) {
  1371. for (mstart = lastbit - width + 1; mstart >= firstbit;
  1372. --mstart) {
  1373. temp = mask << mstart;
  1374. if ((bitmask & temp) == temp)
  1375. goto done_now;
  1376. }
  1377. }
  1378. done_now:
  1379. /* look for any more contiguous 1's to the right of mstart */
  1380. if (width == MASKRANGE_BITS) { // only when maximum mask
  1381. while ((bitmask >> (mstart - 1)) & 1) {
  1382. // slide right over more 1's
  1383. --mstart;
  1384. // count the number of extra bits only for DDR4
  1385. if (ddr_type == DDR4_DRAM)
  1386. extras++;
  1387. }
  1388. }
  1389. /* Penalize any extra 1's beyond the maximum desired mask */
  1390. if (extras > 0)
  1391. toolong =
  1392. RLEVEL_BITMASK_TOOLONG_ERROR * ((1 << extras) - 1);
  1393. /* Detect if bitmask is too narrow. */
  1394. if (width < 4)
  1395. narrow = (4 - width) * RLEVEL_BITMASK_NARROW_ERROR;
  1396. /*
  1397. * detect leading bubble bits, that is, any 0's between first
  1398. * and mstart
  1399. */
  1400. temp = bitmask >> (firstbit + 1);
  1401. i = mstart - firstbit - 1;
  1402. while (--i >= 0) {
  1403. if ((temp & 1) == 0)
  1404. bubble += RLEVEL_BITMASK_BUBBLE_BITS_ERROR;
  1405. temp >>= 1;
  1406. }
  1407. temp = bitmask >> (mstart + width + extras);
  1408. i = lastbit - (mstart + width + extras - 1);
  1409. while (--i >= 0) {
  1410. if (temp & 1) {
  1411. /*
  1412. * Detect 1 bits after the trailing end of
  1413. * the mask, including last.
  1414. */
  1415. trailing += RLEVEL_BITMASK_TRAILING_BITS_ERROR;
  1416. } else {
  1417. /*
  1418. * Detect trailing bubble bits, that is,
  1419. * any 0's between end-of-mask and last
  1420. */
  1421. tbubble += RLEVEL_BITMASK_BUBBLE_BITS_ERROR;
  1422. }
  1423. temp >>= 1;
  1424. }
  1425. }
  1426. errors = bubble + tbubble + blank + narrow + trailing + toolong;
  1427. /* Pass out useful statistics */
  1428. rlevel_bitmask_p->mstart = mstart;
  1429. rlevel_bitmask_p->width = width;
  1430. debug_bitmask_print("bm:%08lx mask:%02lx, width:%2u, mstart:%2d, fb:%2u, lb:%2u (bu:%2d, tb:%2d, bl:%2d, n:%2d, t:%2d, x:%2d) errors:%3d %s\n",
  1431. (unsigned long)bitmask, mask, width, mstart,
  1432. firstbit, lastbit, bubble, tbubble, blank,
  1433. narrow, trailing, toolong, errors,
  1434. (errors) ? "=> invalid" : "");
  1435. return errors;
  1436. }
  1437. int compute_ddr3_rlevel_delay(u8 mstart, u8 width,
  1438. union cvmx_lmcx_rlevel_ctl rlevel_ctl)
  1439. {
  1440. int delay;
  1441. debug_bitmask_print(" offset_en:%d", rlevel_ctl.s.offset_en);
  1442. if (rlevel_ctl.s.offset_en) {
  1443. delay = max((int)mstart,
  1444. (int)(mstart + width - 1 - rlevel_ctl.s.offset));
  1445. } else {
  1446. /* if (rlevel_ctl.s.offset) { *//* Experimental */
  1447. if (0) {
  1448. delay = max(mstart + rlevel_ctl.s.offset, mstart + 1);
  1449. /*
  1450. * Insure that the offset delay falls within the
  1451. * bitmask
  1452. */
  1453. delay = min(delay, mstart + width - 1);
  1454. } else {
  1455. /* Round down */
  1456. delay = (width - 1) / 2 + mstart;
  1457. }
  1458. }
  1459. return delay;
  1460. }
  1461. /* Default ODT config must disable ODT */
  1462. /* Must be const (read only) so that the structure is in flash */
  1463. const struct dimm_odt_config disable_odt_config[] = {
  1464. /* 1 */ { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
  1465. /* 2 */ { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
  1466. /* 3 */ { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
  1467. /* 4 */ { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
  1468. };
  1469. /* Memory controller setup function */
  1470. static int init_octeon_dram_interface(struct ddr_priv *priv,
  1471. struct ddr_conf *ddr_conf,
  1472. u32 ddr_hertz, u32 cpu_hertz,
  1473. u32 ddr_ref_hertz, int if_num,
  1474. u32 if_mask)
  1475. {
  1476. u32 mem_size_mbytes = 0;
  1477. char *s;
  1478. s = lookup_env(priv, "ddr_timing_hertz");
  1479. if (s)
  1480. ddr_hertz = simple_strtoul(s, NULL, 0);
  1481. if (OCTEON_IS_OCTEON3()) {
  1482. int lmc_restart_retries = 0;
  1483. #define DEFAULT_RESTART_RETRIES 3
  1484. int lmc_restart_retries_limit = DEFAULT_RESTART_RETRIES;
  1485. s = lookup_env(priv, "ddr_restart_retries_limit");
  1486. if (s)
  1487. lmc_restart_retries_limit = simple_strtoul(s, NULL, 0);
  1488. restart_lmc_init:
  1489. mem_size_mbytes = init_octeon3_ddr3_interface(priv, ddr_conf,
  1490. ddr_hertz,
  1491. cpu_hertz,
  1492. ddr_ref_hertz,
  1493. if_num, if_mask);
  1494. if (mem_size_mbytes == 0) { // 0 means restart is possible
  1495. if (lmc_restart_retries < lmc_restart_retries_limit) {
  1496. lmc_restart_retries++;
  1497. printf("N0.LMC%d Configuration problem: attempting LMC reset and init restart %d\n",
  1498. if_num, lmc_restart_retries);
  1499. goto restart_lmc_init;
  1500. } else {
  1501. if (lmc_restart_retries_limit > 0) {
  1502. printf("INFO: N0.LMC%d Configuration: fatal problem remains after %d LMC init retries - Resetting node...\n",
  1503. if_num, lmc_restart_retries);
  1504. mdelay(500);
  1505. do_reset(NULL, 0, 0, NULL);
  1506. } else {
  1507. // return an error, no restart
  1508. mem_size_mbytes = -1;
  1509. }
  1510. }
  1511. }
  1512. }
  1513. debug("N0.LMC%d Configuration Completed: %d MB\n",
  1514. if_num, mem_size_mbytes);
  1515. return mem_size_mbytes;
  1516. }
  1517. #define WLEVEL_BYTE_BITS 5
  1518. #define WLEVEL_BYTE_MSK ((1ULL << 5) - 1)
  1519. void upd_wl_rank(union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank,
  1520. int byte, int delay)
  1521. {
  1522. union cvmx_lmcx_wlevel_rankx temp_wlevel_rank;
  1523. if (byte >= 0 && byte <= 8) {
  1524. temp_wlevel_rank.u64 = lmc_wlevel_rank->u64;
  1525. temp_wlevel_rank.u64 &=
  1526. ~(WLEVEL_BYTE_MSK << (WLEVEL_BYTE_BITS * byte));
  1527. temp_wlevel_rank.u64 |=
  1528. ((delay & WLEVEL_BYTE_MSK) << (WLEVEL_BYTE_BITS * byte));
  1529. lmc_wlevel_rank->u64 = temp_wlevel_rank.u64;
  1530. }
  1531. }
  1532. int get_wl_rank(union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank, int byte)
  1533. {
  1534. int delay = 0;
  1535. if (byte >= 0 && byte <= 8)
  1536. delay =
  1537. ((lmc_wlevel_rank->u64) >> (WLEVEL_BYTE_BITS *
  1538. byte)) & WLEVEL_BYTE_MSK;
  1539. return delay;
  1540. }
  1541. void upd_rl_rank(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank,
  1542. int byte, int delay)
  1543. {
  1544. union cvmx_lmcx_rlevel_rankx temp_rlevel_rank;
  1545. if (byte >= 0 && byte <= 8) {
  1546. temp_rlevel_rank.u64 =
  1547. lmc_rlevel_rank->u64 & ~(RLEVEL_BYTE_MSK <<
  1548. (RLEVEL_BYTE_BITS * byte));
  1549. temp_rlevel_rank.u64 |=
  1550. ((delay & RLEVEL_BYTE_MSK) << (RLEVEL_BYTE_BITS * byte));
  1551. lmc_rlevel_rank->u64 = temp_rlevel_rank.u64;
  1552. }
  1553. }
  1554. int get_rl_rank(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank, int byte)
  1555. {
  1556. int delay = 0;
  1557. if (byte >= 0 && byte <= 8)
  1558. delay =
  1559. ((lmc_rlevel_rank->u64) >> (RLEVEL_BYTE_BITS *
  1560. byte)) & RLEVEL_BYTE_MSK;
  1561. return delay;
  1562. }
  1563. void rlevel_to_wlevel(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank,
  1564. union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank, int byte)
  1565. {
  1566. int byte_delay = get_rl_rank(lmc_rlevel_rank, byte);
  1567. debug("Estimating Wlevel delay byte %d: ", byte);
  1568. debug("Rlevel=%d => ", byte_delay);
  1569. byte_delay = divide_roundup(byte_delay, 2) & 0x1e;
  1570. debug("Wlevel=%d\n", byte_delay);
  1571. upd_wl_rank(lmc_wlevel_rank, byte, byte_delay);
  1572. }
  1573. /* Delay trend: constant=0, decreasing=-1, increasing=1 */
  1574. static s64 calc_delay_trend(s64 v)
  1575. {
  1576. if (v == 0)
  1577. return 0;
  1578. if (v < 0)
  1579. return -1;
  1580. return 1;
  1581. }
  1582. /*
  1583. * Evaluate delay sequence across the whole range of byte delays while
  1584. * keeping track of the overall delay trend, increasing or decreasing.
  1585. * If the trend changes charge an error amount to the score.
  1586. */
  1587. // NOTE: "max_adj_delay_inc" argument is, by default, 1 for DDR3 and 2 for DDR4
  1588. int nonseq_del(struct rlevel_byte_data *rlevel_byte, int start, int end,
  1589. int max_adj_delay_inc)
  1590. {
  1591. s64 error = 0;
  1592. s64 delay_trend, prev_trend = 0;
  1593. int byte_idx;
  1594. s64 seq_err;
  1595. s64 adj_err;
  1596. s64 delay_inc;
  1597. s64 delay_diff;
  1598. for (byte_idx = start; byte_idx < end; ++byte_idx) {
  1599. delay_diff = rlevel_byte[byte_idx + 1].delay -
  1600. rlevel_byte[byte_idx].delay;
  1601. delay_trend = calc_delay_trend(delay_diff);
  1602. /*
  1603. * Increment error each time the trend changes to the
  1604. * opposite direction.
  1605. */
  1606. if (prev_trend != 0 && delay_trend != 0 &&
  1607. prev_trend != delay_trend) {
  1608. seq_err = RLEVEL_NONSEQUENTIAL_DELAY_ERROR;
  1609. } else {
  1610. seq_err = 0;
  1611. }
  1612. // how big was the delay change, if any
  1613. delay_inc = abs(delay_diff);
  1614. /*
  1615. * Even if the trend did not change to the opposite direction,
  1616. * check for the magnitude of the change, and scale the
  1617. * penalty by the amount that the size is larger than the
  1618. * provided limit.
  1619. */
  1620. if (max_adj_delay_inc != 0 && delay_inc > max_adj_delay_inc) {
  1621. adj_err = (delay_inc - max_adj_delay_inc) *
  1622. RLEVEL_ADJACENT_DELAY_ERROR;
  1623. } else {
  1624. adj_err = 0;
  1625. }
  1626. rlevel_byte[byte_idx + 1].sqerrs = seq_err + adj_err;
  1627. error += seq_err + adj_err;
  1628. debug_bitmask_print("Byte %d: %d, Byte %d: %d, delay_trend: %ld, prev_trend: %ld, [%ld/%ld]%s%s\n",
  1629. byte_idx + 0,
  1630. rlevel_byte[byte_idx + 0].delay,
  1631. byte_idx + 1,
  1632. rlevel_byte[byte_idx + 1].delay,
  1633. delay_trend,
  1634. prev_trend, seq_err, adj_err,
  1635. (seq_err) ?
  1636. " => Nonsequential byte delay" : "",
  1637. (adj_err) ?
  1638. " => Adjacent delay error" : "");
  1639. if (delay_trend != 0)
  1640. prev_trend = delay_trend;
  1641. }
  1642. return (int)error;
  1643. }
  1644. int roundup_ddr3_wlevel_bitmask(int bitmask)
  1645. {
  1646. int shifted_bitmask;
  1647. int leader;
  1648. int delay;
  1649. for (leader = 0; leader < 8; ++leader) {
  1650. shifted_bitmask = (bitmask >> leader);
  1651. if ((shifted_bitmask & 1) == 0)
  1652. break;
  1653. }
  1654. for (leader = leader; leader < 16; ++leader) {
  1655. shifted_bitmask = (bitmask >> (leader % 8));
  1656. if (shifted_bitmask & 1)
  1657. break;
  1658. }
  1659. delay = (leader & 1) ? leader + 1 : leader;
  1660. delay = delay % 8;
  1661. return delay;
  1662. }
  1663. /* Octeon 2 */
  1664. static void oct2_ddr3_seq(struct ddr_priv *priv, int rank_mask, int if_num,
  1665. int sequence)
  1666. {
  1667. char *s;
  1668. #ifdef DEBUG_PERFORM_DDR3_SEQUENCE
  1669. static const char * const sequence_str[] = {
  1670. "power-up/init",
  1671. "read-leveling",
  1672. "self-refresh entry",
  1673. "self-refresh exit",
  1674. "precharge power-down entry",
  1675. "precharge power-down exit",
  1676. "write-leveling",
  1677. "illegal"
  1678. };
  1679. #endif
  1680. union cvmx_lmcx_control lmc_control;
  1681. union cvmx_lmcx_config lmc_config;
  1682. int save_ddr2t;
  1683. lmc_control.u64 = lmc_rd(priv, CVMX_LMCX_CONTROL(if_num));
  1684. save_ddr2t = lmc_control.s.ddr2t;
  1685. if (save_ddr2t == 0 && octeon_is_cpuid(OCTEON_CN63XX_PASS1_X)) {
  1686. /* Some register parts (IDT and TI included) do not like
  1687. * the sequence that LMC generates for an MRS register
  1688. * write in 1T mode. In this case, the register part does
  1689. * not properly forward the MRS register write to the DRAM
  1690. * parts. See errata (LMC-14548) Issues with registered
  1691. * DIMMs.
  1692. */
  1693. debug("Forcing DDR 2T during init seq. Re: Pass 1 LMC-14548\n");
  1694. lmc_control.s.ddr2t = 1;
  1695. }
  1696. s = lookup_env(priv, "ddr_init_2t");
  1697. if (s)
  1698. lmc_control.s.ddr2t = simple_strtoul(s, NULL, 0);
  1699. lmc_wr(priv, CVMX_LMCX_CONTROL(if_num), lmc_control.u64);
  1700. lmc_config.u64 = lmc_rd(priv, CVMX_LMCX_CONFIG(if_num));
  1701. lmc_config.s.init_start = 1;
  1702. if (OCTEON_IS_OCTEON2())
  1703. lmc_config.cn63xx.sequence = sequence;
  1704. lmc_config.s.rankmask = rank_mask;
  1705. #ifdef DEBUG_PERFORM_DDR3_SEQUENCE
  1706. debug("Performing LMC sequence: rank_mask=0x%02x, sequence=%d, %s\n",
  1707. rank_mask, sequence, sequence_str[sequence]);
  1708. #endif
  1709. lmc_wr(priv, CVMX_LMCX_CONFIG(if_num), lmc_config.u64);
  1710. lmc_rd(priv, CVMX_LMCX_CONFIG(if_num));
  1711. udelay(600); /* Wait a while */
  1712. lmc_control.s.ddr2t = save_ddr2t;
  1713. lmc_wr(priv, CVMX_LMCX_CONTROL(if_num), lmc_control.u64);
  1714. lmc_rd(priv, CVMX_LMCX_CONTROL(if_num));
  1715. }
  1716. /* Check to see if any custom offset values are used */
  1717. static int is_dll_offset_provided(const int8_t *dll_offset_table)
  1718. {
  1719. int i;
  1720. if (!dll_offset_table) /* Check for pointer to table. */
  1721. return 0;
  1722. for (i = 0; i < 9; ++i) {
  1723. if (dll_offset_table[i] != 0)
  1724. return 1;
  1725. }
  1726. return 0;
  1727. }
  1728. void change_dll_offset_enable(struct ddr_priv *priv, int if_num, int change)
  1729. {
  1730. union cvmx_lmcx_dll_ctl3 ddr_dll_ctl3;
  1731. ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
  1732. SET_DDR_DLL_CTL3(offset_ena, !!change);
  1733. lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
  1734. ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
  1735. }
  1736. unsigned short load_dll_offset(struct ddr_priv *priv, int if_num,
  1737. int dll_offset_mode, int byte_offset, int byte)
  1738. {
  1739. union cvmx_lmcx_dll_ctl3 ddr_dll_ctl3;
  1740. int field_width = 6;
  1741. /*
  1742. * byte_sel:
  1743. * 0x1 = byte 0, ..., 0x9 = byte 8
  1744. * 0xA = all bytes
  1745. */
  1746. int byte_sel = (byte == 10) ? byte : byte + 1;
  1747. if (octeon_is_cpuid(OCTEON_CN6XXX))
  1748. field_width = 5;
  1749. ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
  1750. SET_DDR_DLL_CTL3(load_offset, 0);
  1751. lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
  1752. ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
  1753. SET_DDR_DLL_CTL3(mode_sel, dll_offset_mode);
  1754. SET_DDR_DLL_CTL3(offset,
  1755. (abs(byte_offset) & (~(-1 << field_width))) |
  1756. (_sign(byte_offset) << field_width));
  1757. SET_DDR_DLL_CTL3(byte_sel, byte_sel);
  1758. lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
  1759. ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
  1760. SET_DDR_DLL_CTL3(load_offset, 1);
  1761. lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
  1762. ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
  1763. return (unsigned short)GET_DDR_DLL_CTL3(offset);
  1764. }
  1765. void process_custom_dll_offsets(struct ddr_priv *priv, int if_num,
  1766. const char *enable_str,
  1767. const int8_t *offsets, const char *byte_str,
  1768. int mode)
  1769. {
  1770. const char *s;
  1771. int enabled;
  1772. int provided;
  1773. int byte_offset;
  1774. unsigned short offset[9] = { 0 };
  1775. int byte;
  1776. s = lookup_env(priv, enable_str);
  1777. if (s)
  1778. enabled = !!simple_strtol(s, NULL, 0);
  1779. else
  1780. enabled = -1;
  1781. /*
  1782. * enabled == -1: no override, do only configured offsets if provided
  1783. * enabled == 0: override OFF, do NOT do it even if configured
  1784. * offsets provided
  1785. * enabled == 1: override ON, do it for overrides plus configured
  1786. * offsets
  1787. */
  1788. if (enabled == 0)
  1789. return;
  1790. provided = is_dll_offset_provided(offsets);
  1791. if (enabled < 0 && !provided)
  1792. return;
  1793. change_dll_offset_enable(priv, if_num, 0);
  1794. for (byte = 0; byte < 9; ++byte) {
  1795. // always take the provided, if available
  1796. byte_offset = (provided) ? offsets[byte] : 0;
  1797. // then, if enabled, use any overrides present
  1798. if (enabled > 0) {
  1799. s = lookup_env(priv, byte_str, if_num, byte);
  1800. if (s)
  1801. byte_offset = simple_strtol(s, NULL, 0);
  1802. }
  1803. offset[byte] =
  1804. load_dll_offset(priv, if_num, mode, byte_offset, byte);
  1805. }
  1806. change_dll_offset_enable(priv, if_num, 1);
  1807. debug("N0.LMC%d: DLL %s Offset 8:0 : 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
  1808. if_num, (mode == 2) ? "Read " : "Write",
  1809. offset[8], offset[7], offset[6], offset[5], offset[4],
  1810. offset[3], offset[2], offset[1], offset[0]);
  1811. }
  1812. void ddr_init_seq(struct ddr_priv *priv, int rank_mask, int if_num)
  1813. {
  1814. char *s;
  1815. int ddr_init_loops = 1;
  1816. int rankx;
  1817. s = lookup_env(priv, "ddr%d_init_loops", if_num);
  1818. if (s)
  1819. ddr_init_loops = simple_strtoul(s, NULL, 0);
  1820. while (ddr_init_loops--) {
  1821. for (rankx = 0; rankx < 8; rankx++) {
  1822. if (!(rank_mask & (1 << rankx)))
  1823. continue;
  1824. if (OCTEON_IS_OCTEON3()) {
  1825. /* power-up/init */
  1826. oct3_ddr3_seq(priv, 1 << rankx, if_num, 0);
  1827. } else {
  1828. /* power-up/init */
  1829. oct2_ddr3_seq(priv, 1 << rankx, if_num, 0);
  1830. }
  1831. udelay(1000); /* Wait a while. */
  1832. s = lookup_env(priv, "ddr_sequence1");
  1833. if (s) {
  1834. int sequence1;
  1835. sequence1 = simple_strtoul(s, NULL, 0);
  1836. if (OCTEON_IS_OCTEON3()) {
  1837. oct3_ddr3_seq(priv, 1 << rankx,
  1838. if_num, sequence1);
  1839. } else {
  1840. oct2_ddr3_seq(priv, 1 << rankx,
  1841. if_num, sequence1);
  1842. }
  1843. }
  1844. s = lookup_env(priv, "ddr_sequence2");
  1845. if (s) {
  1846. int sequence2;
  1847. sequence2 = simple_strtoul(s, NULL, 0);
  1848. if (OCTEON_IS_OCTEON3())
  1849. oct3_ddr3_seq(priv, 1 << rankx,
  1850. if_num, sequence2);
  1851. else
  1852. oct2_ddr3_seq(priv, 1 << rankx,
  1853. if_num, sequence2);
  1854. }
  1855. }
  1856. }
  1857. }
  1858. static int octeon_ddr_initialize(struct ddr_priv *priv, u32 cpu_hertz,
  1859. u32 ddr_hertz, u32 ddr_ref_hertz,
  1860. u32 if_mask,
  1861. struct ddr_conf *ddr_conf,
  1862. u32 *measured_ddr_hertz)
  1863. {
  1864. u32 ddr_conf_valid_mask = 0;
  1865. int memsize_mbytes = 0;
  1866. char *eptr;
  1867. int if_idx;
  1868. u32 ddr_max_speed = 667000000;
  1869. u32 calc_ddr_hertz = -1;
  1870. int val;
  1871. int ret;
  1872. if (env_get("ddr_verbose") || env_get("ddr_prompt"))
  1873. priv->flags |= FLAG_DDR_VERBOSE;
  1874. #ifdef DDR_VERBOSE
  1875. priv->flags |= FLAG_DDR_VERBOSE;
  1876. #endif
  1877. if (env_get("ddr_trace_init")) {
  1878. printf("Parameter ddr_trace_init found in environment.\n");
  1879. priv->flags |= FLAG_DDR_TRACE_INIT;
  1880. priv->flags |= FLAG_DDR_VERBOSE;
  1881. }
  1882. priv->flags |= FLAG_DDR_DEBUG;
  1883. val = env_get_ulong("ddr_debug", 10, (u32)-1);
  1884. switch (val) {
  1885. case 0:
  1886. priv->flags &= ~FLAG_DDR_DEBUG;
  1887. printf("Parameter ddr_debug clear in environment\n");
  1888. break;
  1889. case (u32)-1:
  1890. break;
  1891. default:
  1892. printf("Parameter ddr_debug set in environment\n");
  1893. priv->flags |= FLAG_DDR_DEBUG;
  1894. priv->flags |= FLAG_DDR_VERBOSE;
  1895. break;
  1896. }
  1897. if (env_get("ddr_prompt"))
  1898. priv->flags |= FLAG_DDR_PROMPT;
  1899. /* Force ddr_verbose for failsafe debugger */
  1900. if (priv->flags & FLAG_FAILSAFE_MODE)
  1901. priv->flags |= FLAG_DDR_VERBOSE;
  1902. #ifdef DDR_DEBUG
  1903. priv->flags |= FLAG_DDR_DEBUG;
  1904. /* Keep verbose on while we are still debugging. */
  1905. priv->flags |= FLAG_DDR_VERBOSE;
  1906. #endif
  1907. if ((octeon_is_cpuid(OCTEON_CN61XX) ||
  1908. octeon_is_cpuid(OCTEON_CNF71XX)) && ddr_max_speed > 533333333) {
  1909. ddr_max_speed = 533333333;
  1910. } else if (octeon_is_cpuid(OCTEON_CN7XXX)) {
  1911. /* Override speed restrictions to support internal testing. */
  1912. ddr_max_speed = 1210000000;
  1913. }
  1914. if (ddr_hertz > ddr_max_speed) {
  1915. printf("DDR clock speed %u exceeds maximum supported DDR speed, reducing to %uHz\n",
  1916. ddr_hertz, ddr_max_speed);
  1917. ddr_hertz = ddr_max_speed;
  1918. }
  1919. if (OCTEON_IS_OCTEON3()) { // restrict check
  1920. if (ddr_hertz > cpu_hertz) {
  1921. printf("\nFATAL ERROR: DDR speed %u exceeds CPU speed %u, exiting...\n\n",
  1922. ddr_hertz, cpu_hertz);
  1923. return -1;
  1924. }
  1925. }
  1926. /* Enable L2 ECC */
  1927. eptr = env_get("disable_l2_ecc");
  1928. if (eptr) {
  1929. printf("Disabling L2 ECC based on disable_l2_ecc environment variable\n");
  1930. union cvmx_l2c_ctl l2c_val;
  1931. l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
  1932. l2c_val.s.disecc = 1;
  1933. l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
  1934. } else {
  1935. union cvmx_l2c_ctl l2c_val;
  1936. l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
  1937. l2c_val.s.disecc = 0;
  1938. l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
  1939. }
  1940. /*
  1941. * Init the L2C, must be done before DRAM access so that we
  1942. * know L2 is empty
  1943. */
  1944. eptr = env_get("disable_l2_index_aliasing");
  1945. if (eptr) {
  1946. union cvmx_l2c_ctl l2c_val;
  1947. puts("L2 index aliasing disabled.\n");
  1948. l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
  1949. l2c_val.s.disidxalias = 1;
  1950. l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
  1951. } else {
  1952. union cvmx_l2c_ctl l2c_val;
  1953. /* Enable L2C index aliasing */
  1954. l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
  1955. l2c_val.s.disidxalias = 0;
  1956. l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
  1957. }
  1958. if (OCTEON_IS_OCTEON3()) {
  1959. /*
  1960. * rdf_cnt: Defines the sample point of the LMC response data in
  1961. * the DDR-clock/core-clock crossing. For optimal
  1962. * performance set to 10 * (DDR-clock period/core-clock
  1963. * period) - 1. To disable set to 0. All other values
  1964. * are reserved.
  1965. */
  1966. union cvmx_l2c_ctl l2c_ctl;
  1967. u64 rdf_cnt;
  1968. char *s;
  1969. l2c_ctl.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
  1970. /*
  1971. * It is more convenient to compute the ratio using clock
  1972. * frequencies rather than clock periods.
  1973. */
  1974. rdf_cnt = (((u64)10 * cpu_hertz) / ddr_hertz) - 1;
  1975. rdf_cnt = rdf_cnt < 256 ? rdf_cnt : 255;
  1976. l2c_ctl.cn78xx.rdf_cnt = rdf_cnt;
  1977. s = lookup_env(priv, "early_fill_count");
  1978. if (s)
  1979. l2c_ctl.cn78xx.rdf_cnt = simple_strtoul(s, NULL, 0);
  1980. debug("%-45s : %d, cpu_hertz:%d, ddr_hertz:%d\n",
  1981. "EARLY FILL COUNT ", l2c_ctl.cn78xx.rdf_cnt, cpu_hertz,
  1982. ddr_hertz);
  1983. l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_ctl.u64);
  1984. }
  1985. /* Check for lower DIMM socket populated */
  1986. for (if_idx = 0; if_idx < 4; ++if_idx) {
  1987. if ((if_mask & (1 << if_idx)) &&
  1988. validate_dimm(priv,
  1989. &ddr_conf[(int)if_idx].dimm_config_table[0],
  1990. 0))
  1991. ddr_conf_valid_mask |= (1 << if_idx);
  1992. }
  1993. if (octeon_is_cpuid(OCTEON_CN68XX) || octeon_is_cpuid(OCTEON_CN78XX)) {
  1994. int four_lmc_mode = 1;
  1995. char *s;
  1996. if (priv->flags & FLAG_FAILSAFE_MODE)
  1997. four_lmc_mode = 0;
  1998. /* Pass 1.0 disable four LMC mode.
  1999. * See errata (LMC-15811)
  2000. */
  2001. if (octeon_is_cpuid(OCTEON_CN68XX_PASS1_0))
  2002. four_lmc_mode = 0;
  2003. s = env_get("ddr_four_lmc");
  2004. if (s) {
  2005. four_lmc_mode = simple_strtoul(s, NULL, 0);
  2006. printf("Parameter found in environment. ddr_four_lmc = %d\n",
  2007. four_lmc_mode);
  2008. }
  2009. if (!four_lmc_mode) {
  2010. puts("Forcing two-LMC Mode.\n");
  2011. /* Invalidate LMC[2:3] */
  2012. ddr_conf_valid_mask &= ~(3 << 2);
  2013. }
  2014. } else if (octeon_is_cpuid(OCTEON_CN73XX)) {
  2015. int one_lmc_mode = 0;
  2016. char *s;
  2017. s = env_get("ddr_one_lmc");
  2018. if (s) {
  2019. one_lmc_mode = simple_strtoul(s, NULL, 0);
  2020. printf("Parameter found in environment. ddr_one_lmc = %d\n",
  2021. one_lmc_mode);
  2022. }
  2023. if (one_lmc_mode) {
  2024. puts("Forcing one-LMC Mode.\n");
  2025. /* Invalidate LMC[1:3] */
  2026. ddr_conf_valid_mask &= ~(1 << 1);
  2027. }
  2028. }
  2029. if (!ddr_conf_valid_mask) {
  2030. printf
  2031. ("ERROR: No valid DIMMs detected on any DDR interface.\n");
  2032. hang();
  2033. return -1; // testr-only: no ret negativ!!!
  2034. }
  2035. /*
  2036. * We measure the DDR frequency by counting DDR clocks. We can
  2037. * confirm or adjust the expected frequency as necessary. We use
  2038. * the measured frequency to make accurate timing calculations
  2039. * used to configure the controller.
  2040. */
  2041. for (if_idx = 0; if_idx < 4; ++if_idx) {
  2042. u32 tmp_hertz;
  2043. if (!(ddr_conf_valid_mask & (1 << if_idx)))
  2044. continue;
  2045. try_again:
  2046. /*
  2047. * only check for alternate refclk wanted on chips that
  2048. * support it
  2049. */
  2050. if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
  2051. (octeon_is_cpuid(OCTEON_CNF75XX)) ||
  2052. (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X))) {
  2053. // only need do this if we are LMC0
  2054. if (if_idx == 0) {
  2055. union cvmx_lmcx_ddr_pll_ctl ddr_pll_ctl;
  2056. ddr_pll_ctl.u64 =
  2057. lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(0));
  2058. /*
  2059. * If we are asking for 100 MHz refclk, we can
  2060. * only get it via alternate, so switch to it
  2061. */
  2062. if (ddr_ref_hertz == 100000000) {
  2063. ddr_pll_ctl.cn78xx.dclk_alt_refclk_sel =
  2064. 1;
  2065. lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(0),
  2066. ddr_pll_ctl.u64);
  2067. udelay(1000); // wait 1 msec
  2068. } else {
  2069. /*
  2070. * If we are NOT asking for 100MHz,
  2071. * then reset to (assumed) 50MHz and go
  2072. * on
  2073. */
  2074. ddr_pll_ctl.cn78xx.dclk_alt_refclk_sel =
  2075. 0;
  2076. lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(0),
  2077. ddr_pll_ctl.u64);
  2078. udelay(1000); // wait 1 msec
  2079. }
  2080. }
  2081. } else {
  2082. if (ddr_ref_hertz == 100000000) {
  2083. debug("N0: DRAM init: requested 100 MHz refclk NOT SUPPORTED\n");
  2084. ddr_ref_hertz = CONFIG_REF_HERTZ;
  2085. }
  2086. }
  2087. tmp_hertz = measure_octeon_ddr_clock(priv, &ddr_conf[if_idx],
  2088. cpu_hertz, ddr_hertz,
  2089. ddr_ref_hertz, if_idx,
  2090. ddr_conf_valid_mask);
  2091. /*
  2092. * only check for alternate refclk acquired on chips that
  2093. * support it
  2094. */
  2095. if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
  2096. (octeon_is_cpuid(OCTEON_CNF75XX)) ||
  2097. (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X))) {
  2098. /*
  2099. * if we are LMC0 and we are asked for 100 MHz refclk,
  2100. * we must be sure it is available
  2101. * If not, we print an error message, set to 50MHz,
  2102. * and go on...
  2103. */
  2104. if (if_idx == 0 && ddr_ref_hertz == 100000000) {
  2105. /*
  2106. * Validate that the clock returned is close
  2107. * enough to the clock desired
  2108. */
  2109. // FIXME: is 5% close enough?
  2110. int hertz_diff =
  2111. abs((int)tmp_hertz - (int)ddr_hertz);
  2112. if (hertz_diff > ((int)ddr_hertz * 5 / 100)) {
  2113. // nope, diff is greater than than 5%
  2114. debug("N0: DRAM init: requested 100 MHz refclk NOT FOUND\n");
  2115. ddr_ref_hertz = CONFIG_REF_HERTZ;
  2116. // clear the flag before trying again!!
  2117. set_ddr_clock_initialized(priv, 0, 0);
  2118. goto try_again;
  2119. } else {
  2120. debug("N0: DRAM Init: requested 100 MHz refclk FOUND and SELECTED\n");
  2121. }
  2122. }
  2123. }
  2124. if (tmp_hertz > 0)
  2125. calc_ddr_hertz = tmp_hertz;
  2126. debug("LMC%d: measured speed: %u hz\n", if_idx, tmp_hertz);
  2127. }
  2128. if (measured_ddr_hertz)
  2129. *measured_ddr_hertz = calc_ddr_hertz;
  2130. memsize_mbytes = 0;
  2131. for (if_idx = 0; if_idx < 4; ++if_idx) {
  2132. if (!(ddr_conf_valid_mask & (1 << if_idx)))
  2133. continue;
  2134. ret = init_octeon_dram_interface(priv, &ddr_conf[if_idx],
  2135. calc_ddr_hertz,
  2136. cpu_hertz, ddr_ref_hertz,
  2137. if_idx, ddr_conf_valid_mask);
  2138. if (ret > 0)
  2139. memsize_mbytes += ret;
  2140. }
  2141. if (memsize_mbytes == 0)
  2142. /* All interfaces failed to initialize, so return error */
  2143. return -1;
  2144. /*
  2145. * switch over to DBI mode only for chips that support it, and
  2146. * enabled by envvar
  2147. */
  2148. if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
  2149. (octeon_is_cpuid(OCTEON_CNF75XX)) ||
  2150. (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X))) {
  2151. eptr = env_get("ddr_dbi_switchover");
  2152. if (eptr) {
  2153. printf("DBI Switchover starting...\n");
  2154. cvmx_dbi_switchover(priv);
  2155. printf("DBI Switchover finished.\n");
  2156. }
  2157. }
  2158. /* call HW-assist tuning here on chips that support it */
  2159. if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
  2160. (octeon_is_cpuid(OCTEON_CNF75XX)) ||
  2161. (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X)))
  2162. cvmx_maybe_tune_node(priv, calc_ddr_hertz);
  2163. eptr = env_get("limit_dram_mbytes");
  2164. if (eptr) {
  2165. unsigned int mbytes = dectoul(eptr, NULL);
  2166. if (mbytes > 0) {
  2167. memsize_mbytes = mbytes;
  2168. printf("Limiting DRAM size to %d MBytes based on limit_dram_mbytes env. variable\n",
  2169. mbytes);
  2170. }
  2171. }
  2172. debug("LMC Initialization complete. Total DRAM %d MB\n",
  2173. memsize_mbytes);
  2174. return memsize_mbytes;
  2175. }
  2176. static int octeon_ddr_probe(struct udevice *dev)
  2177. {
  2178. struct ddr_priv *priv = dev_get_priv(dev);
  2179. struct ofnode_phandle_args l2c_node;
  2180. struct ddr_conf *ddr_conf_ptr;
  2181. u32 ddr_conf_valid_mask = 0;
  2182. u32 measured_ddr_hertz = 0;
  2183. int conf_table_count;
  2184. int def_ddr_freq;
  2185. u32 mem_mbytes = 0;
  2186. u32 ddr_hertz;
  2187. u32 ddr_ref_hertz;
  2188. int alt_refclk;
  2189. const char *eptr;
  2190. fdt_addr_t addr;
  2191. u64 *ptr;
  2192. u64 val;
  2193. int ret;
  2194. int i;
  2195. /* Don't try to re-init the DDR controller after relocation */
  2196. if (gd->flags & GD_FLG_RELOC)
  2197. return 0;
  2198. /*
  2199. * Dummy read all local variables into cache, so that they are
  2200. * locked in cache when the DDR code runs with flushes etc enabled
  2201. */
  2202. ptr = (u64 *)_end;
  2203. for (i = 0; i < (0x100000 / sizeof(u64)); i++)
  2204. val = readq(ptr++);
  2205. /*
  2206. * The base addresses of LMC and L2C are read from the DT. This
  2207. * makes it possible to use the DDR init code without the need
  2208. * of the "node" variable, describing on which node to access. The
  2209. * node number is already included implicitly in the base addresses
  2210. * read from the DT this way.
  2211. */
  2212. /* Get LMC base address */
  2213. priv->lmc_base = dev_remap_addr(dev);
  2214. debug("%s: lmc_base=%p\n", __func__, priv->lmc_base);
  2215. /* Get L2C base address */
  2216. ret = dev_read_phandle_with_args(dev, "l2c-handle", NULL, 0, 0,
  2217. &l2c_node);
  2218. if (ret) {
  2219. printf("Can't access L2C node!\n");
  2220. return -ENODEV;
  2221. }
  2222. addr = ofnode_get_addr(l2c_node.node);
  2223. if (addr == FDT_ADDR_T_NONE) {
  2224. printf("Can't access L2C node!\n");
  2225. return -ENODEV;
  2226. }
  2227. priv->l2c_base = map_physmem(addr, 0, MAP_NOCACHE);
  2228. debug("%s: l2c_base=%p\n", __func__, priv->l2c_base);
  2229. ddr_conf_ptr = octeon_ddr_conf_table_get(&conf_table_count,
  2230. &def_ddr_freq);
  2231. if (!ddr_conf_ptr) {
  2232. printf("ERROR: unable to determine DDR configuration\n");
  2233. return -ENODEV;
  2234. }
  2235. for (i = 0; i < conf_table_count; i++) {
  2236. if (ddr_conf_ptr[i].dimm_config_table[0].spd_addrs[0] ||
  2237. ddr_conf_ptr[i].dimm_config_table[0].spd_ptrs[0])
  2238. ddr_conf_valid_mask |= 1 << i;
  2239. }
  2240. /*
  2241. * Check for special case of mismarked 3005 samples,
  2242. * and adjust cpuid
  2243. */
  2244. alt_refclk = 0;
  2245. ddr_hertz = def_ddr_freq * 1000000;
  2246. eptr = env_get("ddr_clock_hertz");
  2247. if (eptr) {
  2248. ddr_hertz = simple_strtoul(eptr, NULL, 0);
  2249. gd->mem_clk = divide_nint(ddr_hertz, 1000000);
  2250. printf("Parameter found in environment. ddr_clock_hertz = %d\n",
  2251. ddr_hertz);
  2252. }
  2253. ddr_ref_hertz = octeon3_refclock(alt_refclk,
  2254. ddr_hertz,
  2255. &ddr_conf_ptr[0].dimm_config_table[0]);
  2256. debug("Initializing DDR, clock = %uhz, reference = %uhz\n",
  2257. ddr_hertz, ddr_ref_hertz);
  2258. mem_mbytes = octeon_ddr_initialize(priv, gd->cpu_clk,
  2259. ddr_hertz, ddr_ref_hertz,
  2260. ddr_conf_valid_mask,
  2261. ddr_conf_ptr, &measured_ddr_hertz);
  2262. debug("Mem size in MBYTES: %u\n", mem_mbytes);
  2263. gd->mem_clk = divide_nint(measured_ddr_hertz, 1000000);
  2264. debug("Measured DDR clock %d Hz\n", measured_ddr_hertz);
  2265. if (measured_ddr_hertz != 0) {
  2266. if (!gd->mem_clk) {
  2267. /*
  2268. * If ddr_clock not set, use measured clock
  2269. * and don't warn
  2270. */
  2271. gd->mem_clk = divide_nint(measured_ddr_hertz, 1000000);
  2272. } else if ((measured_ddr_hertz > ddr_hertz + 3000000) ||
  2273. (measured_ddr_hertz < ddr_hertz - 3000000)) {
  2274. printf("\nWARNING:\n");
  2275. printf("WARNING: Measured DDR clock mismatch! expected: %lld MHz, measured: %lldMHz, cpu clock: %lu MHz\n",
  2276. divide_nint(ddr_hertz, 1000000),
  2277. divide_nint(measured_ddr_hertz, 1000000),
  2278. gd->cpu_clk);
  2279. printf("WARNING:\n\n");
  2280. gd->mem_clk = divide_nint(measured_ddr_hertz, 1000000);
  2281. }
  2282. }
  2283. if (!mem_mbytes)
  2284. return -ENODEV;
  2285. priv->info.base = CONFIG_SYS_SDRAM_BASE;
  2286. priv->info.size = MB(mem_mbytes);
  2287. /*
  2288. * For 6XXX generate a proper error when reading/writing
  2289. * non-existent memory locations.
  2290. */
  2291. cvmx_l2c_set_big_size(priv, mem_mbytes, 0);
  2292. debug("Ram size %uMiB\n", mem_mbytes);
  2293. return 0;
  2294. }
  2295. static int octeon_get_info(struct udevice *dev, struct ram_info *info)
  2296. {
  2297. struct ddr_priv *priv = dev_get_priv(dev);
  2298. *info = priv->info;
  2299. return 0;
  2300. }
  2301. static struct ram_ops octeon_ops = {
  2302. .get_info = octeon_get_info,
  2303. };
  2304. static const struct udevice_id octeon_ids[] = {
  2305. {.compatible = "cavium,octeon-7xxx-ddr4" },
  2306. { }
  2307. };
  2308. U_BOOT_DRIVER(octeon_ddr) = {
  2309. .name = "octeon_ddr",
  2310. .id = UCLASS_RAM,
  2311. .of_match = octeon_ids,
  2312. .ops = &octeon_ops,
  2313. .probe = octeon_ddr_probe,
  2314. .plat_auto = sizeof(struct ddr_priv),
  2315. };