clk-cgu.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020 Intel Corporation.
  4. * Zhu YiXin <yixin.zhu@intel.com>
  5. * Rahul Tanwar <rahul.tanwar@intel.com>
  6. */
  7. #include <linux/clk-provider.h>
  8. #include <linux/device.h>
  9. #include <linux/of.h>
  10. #include "clk-cgu.h"
  11. #define GATE_HW_REG_STAT(reg) ((reg) + 0x0)
  12. #define GATE_HW_REG_EN(reg) ((reg) + 0x4)
  13. #define GATE_HW_REG_DIS(reg) ((reg) + 0x8)
  14. #define MAX_DDIV_REG 8
  15. #define MAX_DIVIDER_VAL 64
  16. #define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw)
  17. #define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw)
  18. #define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw)
  19. #define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw)
  20. static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
  21. const struct lgm_clk_branch *list)
  22. {
  23. unsigned long flags;
  24. if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
  25. spin_lock_irqsave(&ctx->lock, flags);
  26. lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
  27. list->div_width, list->div_val);
  28. spin_unlock_irqrestore(&ctx->lock, flags);
  29. }
  30. return clk_hw_register_fixed_rate(NULL, list->name,
  31. list->parent_data[0].name,
  32. list->flags, list->mux_flags);
  33. }
  34. static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
  35. {
  36. struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
  37. unsigned long flags;
  38. u32 val;
  39. spin_lock_irqsave(&mux->lock, flags);
  40. if (mux->flags & MUX_CLK_SW)
  41. val = mux->reg;
  42. else
  43. val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
  44. mux->width);
  45. spin_unlock_irqrestore(&mux->lock, flags);
  46. return clk_mux_val_to_index(hw, NULL, mux->flags, val);
  47. }
  48. static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
  49. {
  50. struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
  51. unsigned long flags;
  52. u32 val;
  53. val = clk_mux_index_to_val(NULL, mux->flags, index);
  54. spin_lock_irqsave(&mux->lock, flags);
  55. if (mux->flags & MUX_CLK_SW)
  56. mux->reg = val;
  57. else
  58. lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
  59. mux->width, val);
  60. spin_unlock_irqrestore(&mux->lock, flags);
  61. return 0;
  62. }
  63. static int lgm_clk_mux_determine_rate(struct clk_hw *hw,
  64. struct clk_rate_request *req)
  65. {
  66. struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
  67. return clk_mux_determine_rate_flags(hw, req, mux->flags);
  68. }
  69. static const struct clk_ops lgm_clk_mux_ops = {
  70. .get_parent = lgm_clk_mux_get_parent,
  71. .set_parent = lgm_clk_mux_set_parent,
  72. .determine_rate = lgm_clk_mux_determine_rate,
  73. };
  74. static struct clk_hw *
  75. lgm_clk_register_mux(struct lgm_clk_provider *ctx,
  76. const struct lgm_clk_branch *list)
  77. {
  78. unsigned long flags, cflags = list->mux_flags;
  79. struct device *dev = ctx->dev;
  80. u8 shift = list->mux_shift;
  81. u8 width = list->mux_width;
  82. struct clk_init_data init = {};
  83. struct lgm_clk_mux *mux;
  84. u32 reg = list->mux_off;
  85. struct clk_hw *hw;
  86. int ret;
  87. mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
  88. if (!mux)
  89. return ERR_PTR(-ENOMEM);
  90. init.name = list->name;
  91. init.ops = &lgm_clk_mux_ops;
  92. init.flags = list->flags;
  93. init.parent_data = list->parent_data;
  94. init.num_parents = list->num_parents;
  95. mux->membase = ctx->membase;
  96. mux->lock = ctx->lock;
  97. mux->reg = reg;
  98. mux->shift = shift;
  99. mux->width = width;
  100. mux->flags = cflags;
  101. mux->hw.init = &init;
  102. hw = &mux->hw;
  103. ret = devm_clk_hw_register(dev, hw);
  104. if (ret)
  105. return ERR_PTR(ret);
  106. if (cflags & CLOCK_FLAG_VAL_INIT) {
  107. spin_lock_irqsave(&mux->lock, flags);
  108. lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
  109. spin_unlock_irqrestore(&mux->lock, flags);
  110. }
  111. return hw;
  112. }
  113. static unsigned long
  114. lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  115. {
  116. struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
  117. unsigned long flags;
  118. unsigned int val;
  119. spin_lock_irqsave(&divider->lock, flags);
  120. val = lgm_get_clk_val(divider->membase, divider->reg,
  121. divider->shift, divider->width);
  122. spin_unlock_irqrestore(&divider->lock, flags);
  123. return divider_recalc_rate(hw, parent_rate, val, divider->table,
  124. divider->flags, divider->width);
  125. }
  126. static long
  127. lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
  128. unsigned long *prate)
  129. {
  130. struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
  131. return divider_round_rate(hw, rate, prate, divider->table,
  132. divider->width, divider->flags);
  133. }
  134. static int
  135. lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
  136. unsigned long prate)
  137. {
  138. struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
  139. unsigned long flags;
  140. int value;
  141. value = divider_get_val(rate, prate, divider->table,
  142. divider->width, divider->flags);
  143. if (value < 0)
  144. return value;
  145. spin_lock_irqsave(&divider->lock, flags);
  146. lgm_set_clk_val(divider->membase, divider->reg,
  147. divider->shift, divider->width, value);
  148. spin_unlock_irqrestore(&divider->lock, flags);
  149. return 0;
  150. }
  151. static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
  152. {
  153. struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
  154. unsigned long flags;
  155. spin_lock_irqsave(&div->lock, flags);
  156. lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
  157. div->width_gate, enable);
  158. spin_unlock_irqrestore(&div->lock, flags);
  159. return 0;
  160. }
  161. static int lgm_clk_divider_enable(struct clk_hw *hw)
  162. {
  163. return lgm_clk_divider_enable_disable(hw, 1);
  164. }
  165. static void lgm_clk_divider_disable(struct clk_hw *hw)
  166. {
  167. lgm_clk_divider_enable_disable(hw, 0);
  168. }
  169. static const struct clk_ops lgm_clk_divider_ops = {
  170. .recalc_rate = lgm_clk_divider_recalc_rate,
  171. .round_rate = lgm_clk_divider_round_rate,
  172. .set_rate = lgm_clk_divider_set_rate,
  173. .enable = lgm_clk_divider_enable,
  174. .disable = lgm_clk_divider_disable,
  175. };
  176. static struct clk_hw *
  177. lgm_clk_register_divider(struct lgm_clk_provider *ctx,
  178. const struct lgm_clk_branch *list)
  179. {
  180. unsigned long flags, cflags = list->div_flags;
  181. struct device *dev = ctx->dev;
  182. struct lgm_clk_divider *div;
  183. struct clk_init_data init = {};
  184. u8 shift = list->div_shift;
  185. u8 width = list->div_width;
  186. u8 shift_gate = list->div_shift_gate;
  187. u8 width_gate = list->div_width_gate;
  188. u32 reg = list->div_off;
  189. struct clk_hw *hw;
  190. int ret;
  191. div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
  192. if (!div)
  193. return ERR_PTR(-ENOMEM);
  194. init.name = list->name;
  195. init.ops = &lgm_clk_divider_ops;
  196. init.flags = list->flags;
  197. init.parent_data = list->parent_data;
  198. init.num_parents = 1;
  199. div->membase = ctx->membase;
  200. div->lock = ctx->lock;
  201. div->reg = reg;
  202. div->shift = shift;
  203. div->width = width;
  204. div->shift_gate = shift_gate;
  205. div->width_gate = width_gate;
  206. div->flags = cflags;
  207. div->table = list->div_table;
  208. div->hw.init = &init;
  209. hw = &div->hw;
  210. ret = devm_clk_hw_register(dev, hw);
  211. if (ret)
  212. return ERR_PTR(ret);
  213. if (cflags & CLOCK_FLAG_VAL_INIT) {
  214. spin_lock_irqsave(&div->lock, flags);
  215. lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
  216. spin_unlock_irqrestore(&div->lock, flags);
  217. }
  218. return hw;
  219. }
  220. static struct clk_hw *
  221. lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
  222. const struct lgm_clk_branch *list)
  223. {
  224. unsigned long flags;
  225. struct clk_hw *hw;
  226. hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
  227. list->parent_data[0].name, list->flags,
  228. list->mult, list->div);
  229. if (IS_ERR(hw))
  230. return ERR_CAST(hw);
  231. if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
  232. spin_lock_irqsave(&ctx->lock, flags);
  233. lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
  234. list->div_width, list->div_val);
  235. spin_unlock_irqrestore(&ctx->lock, flags);
  236. }
  237. return hw;
  238. }
  239. static int lgm_clk_gate_enable(struct clk_hw *hw)
  240. {
  241. struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
  242. unsigned long flags;
  243. unsigned int reg;
  244. spin_lock_irqsave(&gate->lock, flags);
  245. reg = GATE_HW_REG_EN(gate->reg);
  246. lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
  247. spin_unlock_irqrestore(&gate->lock, flags);
  248. return 0;
  249. }
  250. static void lgm_clk_gate_disable(struct clk_hw *hw)
  251. {
  252. struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
  253. unsigned long flags;
  254. unsigned int reg;
  255. spin_lock_irqsave(&gate->lock, flags);
  256. reg = GATE_HW_REG_DIS(gate->reg);
  257. lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
  258. spin_unlock_irqrestore(&gate->lock, flags);
  259. }
  260. static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
  261. {
  262. struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
  263. unsigned int reg, ret;
  264. unsigned long flags;
  265. spin_lock_irqsave(&gate->lock, flags);
  266. reg = GATE_HW_REG_STAT(gate->reg);
  267. ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
  268. spin_unlock_irqrestore(&gate->lock, flags);
  269. return ret;
  270. }
  271. static const struct clk_ops lgm_clk_gate_ops = {
  272. .enable = lgm_clk_gate_enable,
  273. .disable = lgm_clk_gate_disable,
  274. .is_enabled = lgm_clk_gate_is_enabled,
  275. };
  276. static struct clk_hw *
  277. lgm_clk_register_gate(struct lgm_clk_provider *ctx,
  278. const struct lgm_clk_branch *list)
  279. {
  280. unsigned long flags, cflags = list->gate_flags;
  281. const char *pname = list->parent_data[0].name;
  282. struct device *dev = ctx->dev;
  283. u8 shift = list->gate_shift;
  284. struct clk_init_data init = {};
  285. struct lgm_clk_gate *gate;
  286. u32 reg = list->gate_off;
  287. struct clk_hw *hw;
  288. int ret;
  289. gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
  290. if (!gate)
  291. return ERR_PTR(-ENOMEM);
  292. init.name = list->name;
  293. init.ops = &lgm_clk_gate_ops;
  294. init.flags = list->flags;
  295. init.parent_names = pname ? &pname : NULL;
  296. init.num_parents = pname ? 1 : 0;
  297. gate->membase = ctx->membase;
  298. gate->lock = ctx->lock;
  299. gate->reg = reg;
  300. gate->shift = shift;
  301. gate->flags = cflags;
  302. gate->hw.init = &init;
  303. hw = &gate->hw;
  304. ret = devm_clk_hw_register(dev, hw);
  305. if (ret)
  306. return ERR_PTR(ret);
  307. if (cflags & CLOCK_FLAG_VAL_INIT) {
  308. spin_lock_irqsave(&gate->lock, flags);
  309. lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
  310. spin_unlock_irqrestore(&gate->lock, flags);
  311. }
  312. return hw;
  313. }
  314. int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
  315. const struct lgm_clk_branch *list,
  316. unsigned int nr_clk)
  317. {
  318. struct clk_hw *hw;
  319. unsigned int idx;
  320. for (idx = 0; idx < nr_clk; idx++, list++) {
  321. switch (list->type) {
  322. case CLK_TYPE_FIXED:
  323. hw = lgm_clk_register_fixed(ctx, list);
  324. break;
  325. case CLK_TYPE_MUX:
  326. hw = lgm_clk_register_mux(ctx, list);
  327. break;
  328. case CLK_TYPE_DIVIDER:
  329. hw = lgm_clk_register_divider(ctx, list);
  330. break;
  331. case CLK_TYPE_FIXED_FACTOR:
  332. hw = lgm_clk_register_fixed_factor(ctx, list);
  333. break;
  334. case CLK_TYPE_GATE:
  335. hw = lgm_clk_register_gate(ctx, list);
  336. break;
  337. default:
  338. dev_err(ctx->dev, "invalid clk type\n");
  339. return -EINVAL;
  340. }
  341. if (IS_ERR(hw)) {
  342. dev_err(ctx->dev,
  343. "register clk: %s, type: %u failed!\n",
  344. list->name, list->type);
  345. return -EIO;
  346. }
  347. ctx->clk_data.hws[list->id] = hw;
  348. }
  349. return 0;
  350. }
  351. static unsigned long
  352. lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  353. {
  354. struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
  355. unsigned int div0, div1, exdiv;
  356. u64 prate;
  357. div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
  358. ddiv->shift0, ddiv->width0) + 1;
  359. div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
  360. ddiv->shift1, ddiv->width1) + 1;
  361. exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg,
  362. ddiv->shift2, ddiv->width2);
  363. prate = (u64)parent_rate;
  364. do_div(prate, div0);
  365. do_div(prate, div1);
  366. if (exdiv) {
  367. do_div(prate, ddiv->div);
  368. prate *= ddiv->mult;
  369. }
  370. return prate;
  371. }
  372. static int lgm_clk_ddiv_enable(struct clk_hw *hw)
  373. {
  374. struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
  375. unsigned long flags;
  376. spin_lock_irqsave(&ddiv->lock, flags);
  377. lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
  378. ddiv->width_gate, 1);
  379. spin_unlock_irqrestore(&ddiv->lock, flags);
  380. return 0;
  381. }
  382. static void lgm_clk_ddiv_disable(struct clk_hw *hw)
  383. {
  384. struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
  385. unsigned long flags;
  386. spin_lock_irqsave(&ddiv->lock, flags);
  387. lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
  388. ddiv->width_gate, 0);
  389. spin_unlock_irqrestore(&ddiv->lock, flags);
  390. }
  391. static int
  392. lgm_clk_get_ddiv_val(u32 div, u32 *ddiv1, u32 *ddiv2)
  393. {
  394. u32 idx, temp;
  395. *ddiv1 = 1;
  396. *ddiv2 = 1;
  397. if (div > MAX_DIVIDER_VAL)
  398. div = MAX_DIVIDER_VAL;
  399. if (div > 1) {
  400. for (idx = 2; idx <= MAX_DDIV_REG; idx++) {
  401. temp = DIV_ROUND_UP_ULL((u64)div, idx);
  402. if (div % idx == 0 && temp <= MAX_DDIV_REG)
  403. break;
  404. }
  405. if (idx > MAX_DDIV_REG)
  406. return -EINVAL;
  407. *ddiv1 = temp;
  408. *ddiv2 = idx;
  409. }
  410. return 0;
  411. }
  412. static int
  413. lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
  414. unsigned long prate)
  415. {
  416. struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
  417. u32 div, ddiv1, ddiv2;
  418. unsigned long flags;
  419. div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
  420. spin_lock_irqsave(&ddiv->lock, flags);
  421. if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
  422. div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
  423. div = div * 2;
  424. }
  425. if (div <= 0) {
  426. spin_unlock_irqrestore(&ddiv->lock, flags);
  427. return -EINVAL;
  428. }
  429. if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) {
  430. spin_unlock_irqrestore(&ddiv->lock, flags);
  431. return -EINVAL;
  432. }
  433. lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
  434. ddiv1 - 1);
  435. lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1,
  436. ddiv2 - 1);
  437. spin_unlock_irqrestore(&ddiv->lock, flags);
  438. return 0;
  439. }
  440. static long
  441. lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
  442. unsigned long *prate)
  443. {
  444. struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
  445. u32 div, ddiv1, ddiv2;
  446. unsigned long flags;
  447. u64 rate64;
  448. div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
  449. /* if predivide bit is enabled, modify div by factor of 2.5 */
  450. spin_lock_irqsave(&ddiv->lock, flags);
  451. if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
  452. div = div * 2;
  453. div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
  454. }
  455. spin_unlock_irqrestore(&ddiv->lock, flags);
  456. if (div <= 0)
  457. return *prate;
  458. if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0)
  459. if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0)
  460. return -EINVAL;
  461. rate64 = *prate;
  462. do_div(rate64, ddiv1);
  463. do_div(rate64, ddiv2);
  464. /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
  465. spin_lock_irqsave(&ddiv->lock, flags);
  466. if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
  467. rate64 = rate64 * 2;
  468. rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
  469. }
  470. spin_unlock_irqrestore(&ddiv->lock, flags);
  471. return rate64;
  472. }
  473. static const struct clk_ops lgm_clk_ddiv_ops = {
  474. .recalc_rate = lgm_clk_ddiv_recalc_rate,
  475. .enable = lgm_clk_ddiv_enable,
  476. .disable = lgm_clk_ddiv_disable,
  477. .set_rate = lgm_clk_ddiv_set_rate,
  478. .round_rate = lgm_clk_ddiv_round_rate,
  479. };
  480. int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
  481. const struct lgm_clk_ddiv_data *list,
  482. unsigned int nr_clk)
  483. {
  484. struct device *dev = ctx->dev;
  485. struct clk_hw *hw;
  486. unsigned int idx;
  487. int ret;
  488. for (idx = 0; idx < nr_clk; idx++, list++) {
  489. struct clk_init_data init = {};
  490. struct lgm_clk_ddiv *ddiv;
  491. ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL);
  492. if (!ddiv)
  493. return -ENOMEM;
  494. init.name = list->name;
  495. init.ops = &lgm_clk_ddiv_ops;
  496. init.flags = list->flags;
  497. init.parent_data = list->parent_data;
  498. init.num_parents = 1;
  499. ddiv->membase = ctx->membase;
  500. ddiv->lock = ctx->lock;
  501. ddiv->reg = list->reg;
  502. ddiv->shift0 = list->shift0;
  503. ddiv->width0 = list->width0;
  504. ddiv->shift1 = list->shift1;
  505. ddiv->width1 = list->width1;
  506. ddiv->shift_gate = list->shift_gate;
  507. ddiv->width_gate = list->width_gate;
  508. ddiv->shift2 = list->ex_shift;
  509. ddiv->width2 = list->ex_width;
  510. ddiv->flags = list->div_flags;
  511. ddiv->mult = 2;
  512. ddiv->div = 5;
  513. ddiv->hw.init = &init;
  514. hw = &ddiv->hw;
  515. ret = devm_clk_hw_register(dev, hw);
  516. if (ret) {
  517. dev_err(dev, "register clk: %s failed!\n", list->name);
  518. return ret;
  519. }
  520. ctx->clk_data.hws[list->id] = hw;
  521. }
  522. return 0;
  523. }