ipu-image-convert.c 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2012-2016 Mentor Graphics Inc.
  4. *
  5. * Queued image conversion support, with tiling and rotation.
  6. */
  7. #include <linux/interrupt.h>
  8. #include <linux/dma-mapping.h>
  9. #include <video/imx-ipu-image-convert.h>
  10. #include "ipu-prv.h"
  11. /*
  12. * The IC Resizer has a restriction that the output frame from the
  13. * resizer must be 1024 or less in both width (pixels) and height
  14. * (lines).
  15. *
  16. * The image converter attempts to split up a conversion when
  17. * the desired output (converted) frame resolution exceeds the
  18. * IC resizer limit of 1024 in either dimension.
  19. *
  20. * If either dimension of the output frame exceeds the limit, the
  21. * dimension is split into 1, 2, or 4 equal stripes, for a maximum
  22. * of 4*4 or 16 tiles. A conversion is then carried out for each
  23. * tile (but taking care to pass the full frame stride length to
  24. * the DMA channel's parameter memory!). IDMA double-buffering is used
  25. * to convert each tile back-to-back when possible (see note below
  26. * when double_buffering boolean is set).
  27. *
  28. * Note that the input frame must be split up into the same number
  29. * of tiles as the output frame:
  30. *
  31. * +---------+-----+
  32. * +-----+---+ | A | B |
  33. * | A | B | | | |
  34. * +-----+---+ --> +---------+-----+
  35. * | C | D | | C | D |
  36. * +-----+---+ | | |
  37. * +---------+-----+
  38. *
  39. * Clockwise 90° rotations are handled by first rescaling into a
  40. * reusable temporary tile buffer and then rotating with the 8x8
  41. * block rotator, writing to the correct destination:
  42. *
  43. * +-----+-----+
  44. * | | |
  45. * +-----+---+ +---------+ | C | A |
  46. * | A | B | | A,B, | | | | |
  47. * +-----+---+ --> | C,D | | --> | | |
  48. * | C | D | +---------+ +-----+-----+
  49. * +-----+---+ | D | B |
  50. * | | |
  51. * +-----+-----+
  52. *
  53. * If the 8x8 block rotator is used, horizontal or vertical flipping
  54. * is done during the rotation step, otherwise flipping is done
  55. * during the scaling step.
  56. * With rotation or flipping, tile order changes between input and
  57. * output image. Tiles are numbered row major from top left to bottom
  58. * right for both input and output image.
  59. */
  60. #define MAX_STRIPES_W 4
  61. #define MAX_STRIPES_H 4
  62. #define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
  63. #define MIN_W 16
  64. #define MIN_H 8
  65. #define MAX_W 4096
  66. #define MAX_H 4096
  67. enum ipu_image_convert_type {
  68. IMAGE_CONVERT_IN = 0,
  69. IMAGE_CONVERT_OUT,
  70. };
  71. struct ipu_image_convert_dma_buf {
  72. void *virt;
  73. dma_addr_t phys;
  74. unsigned long len;
  75. };
  76. struct ipu_image_convert_dma_chan {
  77. int in;
  78. int out;
  79. int rot_in;
  80. int rot_out;
  81. int vdi_in_p;
  82. int vdi_in;
  83. int vdi_in_n;
  84. };
  85. /* dimensions of one tile */
  86. struct ipu_image_tile {
  87. u32 width;
  88. u32 height;
  89. u32 left;
  90. u32 top;
  91. /* size and strides are in bytes */
  92. u32 size;
  93. u32 stride;
  94. u32 rot_stride;
  95. /* start Y or packed offset of this tile */
  96. u32 offset;
  97. /* offset from start to tile in U plane, for planar formats */
  98. u32 u_off;
  99. /* offset from start to tile in V plane, for planar formats */
  100. u32 v_off;
  101. };
  102. struct ipu_image_convert_image {
  103. struct ipu_image base;
  104. enum ipu_image_convert_type type;
  105. const struct ipu_image_pixfmt *fmt;
  106. unsigned int stride;
  107. /* # of rows (horizontal stripes) if dest height is > 1024 */
  108. unsigned int num_rows;
  109. /* # of columns (vertical stripes) if dest width is > 1024 */
  110. unsigned int num_cols;
  111. struct ipu_image_tile tile[MAX_TILES];
  112. };
  113. struct ipu_image_pixfmt {
  114. u32 fourcc; /* V4L2 fourcc */
  115. int bpp; /* total bpp */
  116. int uv_width_dec; /* decimation in width for U/V planes */
  117. int uv_height_dec; /* decimation in height for U/V planes */
  118. bool planar; /* planar format */
  119. bool uv_swapped; /* U and V planes are swapped */
  120. bool uv_packed; /* partial planar (U and V in same plane) */
  121. };
  122. struct ipu_image_convert_ctx;
  123. struct ipu_image_convert_chan;
  124. struct ipu_image_convert_priv;
  125. enum eof_irq_mask {
  126. EOF_IRQ_IN = BIT(0),
  127. EOF_IRQ_ROT_IN = BIT(1),
  128. EOF_IRQ_OUT = BIT(2),
  129. EOF_IRQ_ROT_OUT = BIT(3),
  130. };
  131. #define EOF_IRQ_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT)
  132. #define EOF_IRQ_ROT_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT | \
  133. EOF_IRQ_ROT_IN | EOF_IRQ_ROT_OUT)
  134. struct ipu_image_convert_ctx {
  135. struct ipu_image_convert_chan *chan;
  136. ipu_image_convert_cb_t complete;
  137. void *complete_context;
  138. /* Source/destination image data and rotation mode */
  139. struct ipu_image_convert_image in;
  140. struct ipu_image_convert_image out;
  141. struct ipu_ic_csc csc;
  142. enum ipu_rotate_mode rot_mode;
  143. u32 downsize_coeff_h;
  144. u32 downsize_coeff_v;
  145. u32 image_resize_coeff_h;
  146. u32 image_resize_coeff_v;
  147. u32 resize_coeffs_h[MAX_STRIPES_W];
  148. u32 resize_coeffs_v[MAX_STRIPES_H];
  149. /* intermediate buffer for rotation */
  150. struct ipu_image_convert_dma_buf rot_intermediate[2];
  151. /* current buffer number for double buffering */
  152. int cur_buf_num;
  153. bool aborting;
  154. struct completion aborted;
  155. /* can we use double-buffering for this conversion operation? */
  156. bool double_buffering;
  157. /* num_rows * num_cols */
  158. unsigned int num_tiles;
  159. /* next tile to process */
  160. unsigned int next_tile;
  161. /* where to place converted tile in dest image */
  162. unsigned int out_tile_map[MAX_TILES];
  163. /* mask of completed EOF irqs at every tile conversion */
  164. enum eof_irq_mask eof_mask;
  165. struct list_head list;
  166. };
  167. struct ipu_image_convert_chan {
  168. struct ipu_image_convert_priv *priv;
  169. enum ipu_ic_task ic_task;
  170. const struct ipu_image_convert_dma_chan *dma_ch;
  171. struct ipu_ic *ic;
  172. struct ipuv3_channel *in_chan;
  173. struct ipuv3_channel *out_chan;
  174. struct ipuv3_channel *rotation_in_chan;
  175. struct ipuv3_channel *rotation_out_chan;
  176. /* the IPU end-of-frame irqs */
  177. int in_eof_irq;
  178. int rot_in_eof_irq;
  179. int out_eof_irq;
  180. int rot_out_eof_irq;
  181. spinlock_t irqlock;
  182. /* list of convert contexts */
  183. struct list_head ctx_list;
  184. /* queue of conversion runs */
  185. struct list_head pending_q;
  186. /* queue of completed runs */
  187. struct list_head done_q;
  188. /* the current conversion run */
  189. struct ipu_image_convert_run *current_run;
  190. };
  191. struct ipu_image_convert_priv {
  192. struct ipu_image_convert_chan chan[IC_NUM_TASKS];
  193. struct ipu_soc *ipu;
  194. };
  195. static const struct ipu_image_convert_dma_chan
  196. image_convert_dma_chan[IC_NUM_TASKS] = {
  197. [IC_TASK_VIEWFINDER] = {
  198. .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
  199. .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
  200. .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
  201. .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
  202. .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
  203. .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
  204. .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
  205. },
  206. [IC_TASK_POST_PROCESSOR] = {
  207. .in = IPUV3_CHANNEL_MEM_IC_PP,
  208. .out = IPUV3_CHANNEL_IC_PP_MEM,
  209. .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
  210. .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
  211. },
  212. };
  213. static const struct ipu_image_pixfmt image_convert_formats[] = {
  214. {
  215. .fourcc = V4L2_PIX_FMT_RGB565,
  216. .bpp = 16,
  217. }, {
  218. .fourcc = V4L2_PIX_FMT_RGB24,
  219. .bpp = 24,
  220. }, {
  221. .fourcc = V4L2_PIX_FMT_BGR24,
  222. .bpp = 24,
  223. }, {
  224. .fourcc = V4L2_PIX_FMT_RGB32,
  225. .bpp = 32,
  226. }, {
  227. .fourcc = V4L2_PIX_FMT_BGR32,
  228. .bpp = 32,
  229. }, {
  230. .fourcc = V4L2_PIX_FMT_XRGB32,
  231. .bpp = 32,
  232. }, {
  233. .fourcc = V4L2_PIX_FMT_XBGR32,
  234. .bpp = 32,
  235. }, {
  236. .fourcc = V4L2_PIX_FMT_BGRX32,
  237. .bpp = 32,
  238. }, {
  239. .fourcc = V4L2_PIX_FMT_RGBX32,
  240. .bpp = 32,
  241. }, {
  242. .fourcc = V4L2_PIX_FMT_YUYV,
  243. .bpp = 16,
  244. .uv_width_dec = 2,
  245. .uv_height_dec = 1,
  246. }, {
  247. .fourcc = V4L2_PIX_FMT_UYVY,
  248. .bpp = 16,
  249. .uv_width_dec = 2,
  250. .uv_height_dec = 1,
  251. }, {
  252. .fourcc = V4L2_PIX_FMT_YUV420,
  253. .bpp = 12,
  254. .planar = true,
  255. .uv_width_dec = 2,
  256. .uv_height_dec = 2,
  257. }, {
  258. .fourcc = V4L2_PIX_FMT_YVU420,
  259. .bpp = 12,
  260. .planar = true,
  261. .uv_width_dec = 2,
  262. .uv_height_dec = 2,
  263. .uv_swapped = true,
  264. }, {
  265. .fourcc = V4L2_PIX_FMT_NV12,
  266. .bpp = 12,
  267. .planar = true,
  268. .uv_width_dec = 2,
  269. .uv_height_dec = 2,
  270. .uv_packed = true,
  271. }, {
  272. .fourcc = V4L2_PIX_FMT_YUV422P,
  273. .bpp = 16,
  274. .planar = true,
  275. .uv_width_dec = 2,
  276. .uv_height_dec = 1,
  277. }, {
  278. .fourcc = V4L2_PIX_FMT_NV16,
  279. .bpp = 16,
  280. .planar = true,
  281. .uv_width_dec = 2,
  282. .uv_height_dec = 1,
  283. .uv_packed = true,
  284. },
  285. };
  286. static const struct ipu_image_pixfmt *get_format(u32 fourcc)
  287. {
  288. const struct ipu_image_pixfmt *ret = NULL;
  289. unsigned int i;
  290. for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
  291. if (image_convert_formats[i].fourcc == fourcc) {
  292. ret = &image_convert_formats[i];
  293. break;
  294. }
  295. }
  296. return ret;
  297. }
  298. static void dump_format(struct ipu_image_convert_ctx *ctx,
  299. struct ipu_image_convert_image *ic_image)
  300. {
  301. struct ipu_image_convert_chan *chan = ctx->chan;
  302. struct ipu_image_convert_priv *priv = chan->priv;
  303. dev_dbg(priv->ipu->dev,
  304. "task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n",
  305. chan->ic_task, ctx,
  306. ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
  307. ic_image->base.pix.width, ic_image->base.pix.height,
  308. ic_image->num_cols, ic_image->num_rows,
  309. ic_image->fmt->fourcc & 0xff,
  310. (ic_image->fmt->fourcc >> 8) & 0xff,
  311. (ic_image->fmt->fourcc >> 16) & 0xff,
  312. (ic_image->fmt->fourcc >> 24) & 0xff);
  313. }
  314. int ipu_image_convert_enum_format(int index, u32 *fourcc)
  315. {
  316. const struct ipu_image_pixfmt *fmt;
  317. if (index >= (int)ARRAY_SIZE(image_convert_formats))
  318. return -EINVAL;
  319. /* Format found */
  320. fmt = &image_convert_formats[index];
  321. *fourcc = fmt->fourcc;
  322. return 0;
  323. }
  324. EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
  325. static void free_dma_buf(struct ipu_image_convert_priv *priv,
  326. struct ipu_image_convert_dma_buf *buf)
  327. {
  328. if (buf->virt)
  329. dma_free_coherent(priv->ipu->dev,
  330. buf->len, buf->virt, buf->phys);
  331. buf->virt = NULL;
  332. buf->phys = 0;
  333. }
  334. static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
  335. struct ipu_image_convert_dma_buf *buf,
  336. int size)
  337. {
  338. buf->len = PAGE_ALIGN(size);
  339. buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
  340. GFP_DMA | GFP_KERNEL);
  341. if (!buf->virt) {
  342. dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
  343. return -ENOMEM;
  344. }
  345. return 0;
  346. }
  347. static inline int num_stripes(int dim)
  348. {
  349. return (dim - 1) / 1024 + 1;
  350. }
  351. /*
  352. * Calculate downsizing coefficients, which are the same for all tiles,
  353. * and initial bilinear resizing coefficients, which are used to find the
  354. * best seam positions.
  355. * Also determine the number of tiles necessary to guarantee that no tile
  356. * is larger than 1024 pixels in either dimension at the output and between
  357. * IC downsizing and main processing sections.
  358. */
  359. static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
  360. struct ipu_image *in,
  361. struct ipu_image *out)
  362. {
  363. u32 downsized_width = in->rect.width;
  364. u32 downsized_height = in->rect.height;
  365. u32 downsize_coeff_v = 0;
  366. u32 downsize_coeff_h = 0;
  367. u32 resized_width = out->rect.width;
  368. u32 resized_height = out->rect.height;
  369. u32 resize_coeff_h;
  370. u32 resize_coeff_v;
  371. u32 cols;
  372. u32 rows;
  373. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  374. resized_width = out->rect.height;
  375. resized_height = out->rect.width;
  376. }
  377. /* Do not let invalid input lead to an endless loop below */
  378. if (WARN_ON(resized_width == 0 || resized_height == 0))
  379. return -EINVAL;
  380. while (downsized_width >= resized_width * 2) {
  381. downsized_width >>= 1;
  382. downsize_coeff_h++;
  383. }
  384. while (downsized_height >= resized_height * 2) {
  385. downsized_height >>= 1;
  386. downsize_coeff_v++;
  387. }
  388. /*
  389. * Calculate the bilinear resizing coefficients that could be used if
  390. * we were converting with a single tile. The bottom right output pixel
  391. * should sample as close as possible to the bottom right input pixel
  392. * out of the decimator, but not overshoot it:
  393. */
  394. resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1);
  395. resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1);
  396. /*
  397. * Both the output of the IC downsizing section before being passed to
  398. * the IC main processing section and the final output of the IC main
  399. * processing section must be <= 1024 pixels in both dimensions.
  400. */
  401. cols = num_stripes(max_t(u32, downsized_width, resized_width));
  402. rows = num_stripes(max_t(u32, downsized_height, resized_height));
  403. dev_dbg(ctx->chan->priv->ipu->dev,
  404. "%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n",
  405. __func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v,
  406. resize_coeff_v, cols, rows);
  407. if (downsize_coeff_h > 2 || downsize_coeff_v > 2 ||
  408. resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff)
  409. return -EINVAL;
  410. ctx->downsize_coeff_h = downsize_coeff_h;
  411. ctx->downsize_coeff_v = downsize_coeff_v;
  412. ctx->image_resize_coeff_h = resize_coeff_h;
  413. ctx->image_resize_coeff_v = resize_coeff_v;
  414. ctx->in.num_cols = cols;
  415. ctx->in.num_rows = rows;
  416. return 0;
  417. }
  418. #define round_closest(x, y) round_down((x) + (y)/2, (y))
  419. /*
  420. * Find the best aligned seam position for the given column / row index.
  421. * Rotation and image offsets are out of scope.
  422. *
  423. * @index: column / row index, used to calculate valid interval
  424. * @in_edge: input right / bottom edge
  425. * @out_edge: output right / bottom edge
  426. * @in_align: input alignment, either horizontal 8-byte line start address
  427. * alignment, or pixel alignment due to image format
  428. * @out_align: output alignment, either horizontal 8-byte line start address
  429. * alignment, or pixel alignment due to image format or rotator
  430. * block size
  431. * @in_burst: horizontal input burst size in case of horizontal flip
  432. * @out_burst: horizontal output burst size or rotator block size
  433. * @downsize_coeff: downsizing section coefficient
  434. * @resize_coeff: main processing section resizing coefficient
  435. * @_in_seam: aligned input seam position return value
  436. * @_out_seam: aligned output seam position return value
  437. */
  438. static void find_best_seam(struct ipu_image_convert_ctx *ctx,
  439. unsigned int index,
  440. unsigned int in_edge,
  441. unsigned int out_edge,
  442. unsigned int in_align,
  443. unsigned int out_align,
  444. unsigned int in_burst,
  445. unsigned int out_burst,
  446. unsigned int downsize_coeff,
  447. unsigned int resize_coeff,
  448. u32 *_in_seam,
  449. u32 *_out_seam)
  450. {
  451. struct device *dev = ctx->chan->priv->ipu->dev;
  452. unsigned int out_pos;
  453. /* Input / output seam position candidates */
  454. unsigned int out_seam = 0;
  455. unsigned int in_seam = 0;
  456. unsigned int min_diff = UINT_MAX;
  457. unsigned int out_start;
  458. unsigned int out_end;
  459. unsigned int in_start;
  460. unsigned int in_end;
  461. /* Start within 1024 pixels of the right / bottom edge */
  462. out_start = max_t(int, index * out_align, out_edge - 1024);
  463. /* End before having to add more columns to the left / rows above */
  464. out_end = min_t(unsigned int, out_edge, index * 1024 + 1);
  465. /*
  466. * Limit input seam position to make sure that the downsized input tile
  467. * to the right or bottom does not exceed 1024 pixels.
  468. */
  469. in_start = max_t(int, index * in_align,
  470. in_edge - (1024 << downsize_coeff));
  471. in_end = min_t(unsigned int, in_edge,
  472. index * (1024 << downsize_coeff) + 1);
  473. /*
  474. * Output tiles must start at a multiple of 8 bytes horizontally and
  475. * possibly at an even line horizontally depending on the pixel format.
  476. * Only consider output aligned positions for the seam.
  477. */
  478. out_start = round_up(out_start, out_align);
  479. for (out_pos = out_start; out_pos < out_end; out_pos += out_align) {
  480. unsigned int in_pos;
  481. unsigned int in_pos_aligned;
  482. unsigned int in_pos_rounded;
  483. unsigned int abs_diff;
  484. /*
  485. * Tiles in the right row / bottom column may not be allowed to
  486. * overshoot horizontally / vertically. out_burst may be the
  487. * actual DMA burst size, or the rotator block size.
  488. */
  489. if ((out_burst > 1) && (out_edge - out_pos) % out_burst)
  490. continue;
  491. /*
  492. * Input sample position, corresponding to out_pos, 19.13 fixed
  493. * point.
  494. */
  495. in_pos = (out_pos * resize_coeff) << downsize_coeff;
  496. /*
  497. * The closest input sample position that we could actually
  498. * start the input tile at, 19.13 fixed point.
  499. */
  500. in_pos_aligned = round_closest(in_pos, 8192U * in_align);
  501. /* Convert 19.13 fixed point to integer */
  502. in_pos_rounded = in_pos_aligned / 8192U;
  503. if (in_pos_rounded < in_start)
  504. continue;
  505. if (in_pos_rounded >= in_end)
  506. break;
  507. if ((in_burst > 1) &&
  508. (in_edge - in_pos_rounded) % in_burst)
  509. continue;
  510. if (in_pos < in_pos_aligned)
  511. abs_diff = in_pos_aligned - in_pos;
  512. else
  513. abs_diff = in_pos - in_pos_aligned;
  514. if (abs_diff < min_diff) {
  515. in_seam = in_pos_rounded;
  516. out_seam = out_pos;
  517. min_diff = abs_diff;
  518. }
  519. }
  520. *_out_seam = out_seam;
  521. *_in_seam = in_seam;
  522. dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) in [%u, %u] diff %u.%03u\n",
  523. __func__, out_seam, out_align, out_start, out_end,
  524. in_seam, in_align, in_start, in_end, min_diff / 8192,
  525. DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192));
  526. }
  527. /*
  528. * Tile left edges are required to be aligned to multiples of 8 bytes
  529. * by the IDMAC.
  530. */
  531. static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt)
  532. {
  533. if (fmt->planar)
  534. return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec;
  535. else
  536. return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8;
  537. }
  538. /*
  539. * Tile top edge alignment is only limited by chroma subsampling.
  540. */
  541. static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt)
  542. {
  543. return fmt->uv_height_dec > 1 ? 2 : 1;
  544. }
  545. static inline u32 tile_width_align(enum ipu_image_convert_type type,
  546. const struct ipu_image_pixfmt *fmt,
  547. enum ipu_rotate_mode rot_mode)
  548. {
  549. if (type == IMAGE_CONVERT_IN) {
  550. /*
  551. * The IC burst reads 8 pixels at a time. Reading beyond the
  552. * end of the line is usually acceptable. Those pixels are
  553. * ignored, unless the IC has to write the scaled line in
  554. * reverse.
  555. */
  556. return (!ipu_rot_mode_is_irt(rot_mode) &&
  557. (rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2;
  558. }
  559. /*
  560. * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
  561. * formats to guarantee 8-byte aligned line start addresses in the
  562. * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
  563. * for all other formats.
  564. */
  565. return (ipu_rot_mode_is_irt(rot_mode) &&
  566. fmt->planar && !fmt->uv_packed) ?
  567. 8 * fmt->uv_width_dec : 8;
  568. }
  569. static inline u32 tile_height_align(enum ipu_image_convert_type type,
  570. const struct ipu_image_pixfmt *fmt,
  571. enum ipu_rotate_mode rot_mode)
  572. {
  573. if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode))
  574. return 2;
  575. /*
  576. * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
  577. * formats to guarantee 8-byte aligned line start addresses in the
  578. * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
  579. * for all other formats.
  580. */
  581. return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8;
  582. }
  583. /*
  584. * Fill in left position and width and for all tiles in an input column, and
  585. * for all corresponding output tiles. If the 90° rotator is used, the output
  586. * tiles are in a row, and output tile top position and height are set.
  587. */
  588. static void fill_tile_column(struct ipu_image_convert_ctx *ctx,
  589. unsigned int col,
  590. struct ipu_image_convert_image *in,
  591. unsigned int in_left, unsigned int in_width,
  592. struct ipu_image_convert_image *out,
  593. unsigned int out_left, unsigned int out_width)
  594. {
  595. unsigned int row, tile_idx;
  596. struct ipu_image_tile *in_tile, *out_tile;
  597. for (row = 0; row < in->num_rows; row++) {
  598. tile_idx = in->num_cols * row + col;
  599. in_tile = &in->tile[tile_idx];
  600. out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
  601. in_tile->left = in_left;
  602. in_tile->width = in_width;
  603. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  604. out_tile->top = out_left;
  605. out_tile->height = out_width;
  606. } else {
  607. out_tile->left = out_left;
  608. out_tile->width = out_width;
  609. }
  610. }
  611. }
  612. /*
  613. * Fill in top position and height and for all tiles in an input row, and
  614. * for all corresponding output tiles. If the 90° rotator is used, the output
  615. * tiles are in a column, and output tile left position and width are set.
  616. */
  617. static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row,
  618. struct ipu_image_convert_image *in,
  619. unsigned int in_top, unsigned int in_height,
  620. struct ipu_image_convert_image *out,
  621. unsigned int out_top, unsigned int out_height)
  622. {
  623. unsigned int col, tile_idx;
  624. struct ipu_image_tile *in_tile, *out_tile;
  625. for (col = 0; col < in->num_cols; col++) {
  626. tile_idx = in->num_cols * row + col;
  627. in_tile = &in->tile[tile_idx];
  628. out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
  629. in_tile->top = in_top;
  630. in_tile->height = in_height;
  631. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  632. out_tile->left = out_top;
  633. out_tile->width = out_height;
  634. } else {
  635. out_tile->top = out_top;
  636. out_tile->height = out_height;
  637. }
  638. }
  639. }
  640. /*
  641. * Find the best horizontal and vertical seam positions to split into tiles.
  642. * Minimize the fractional part of the input sampling position for the
  643. * top / left pixels of each tile.
  644. */
  645. static void find_seams(struct ipu_image_convert_ctx *ctx,
  646. struct ipu_image_convert_image *in,
  647. struct ipu_image_convert_image *out)
  648. {
  649. struct device *dev = ctx->chan->priv->ipu->dev;
  650. unsigned int resized_width = out->base.rect.width;
  651. unsigned int resized_height = out->base.rect.height;
  652. unsigned int col;
  653. unsigned int row;
  654. unsigned int in_left_align = tile_left_align(in->fmt);
  655. unsigned int in_top_align = tile_top_align(in->fmt);
  656. unsigned int out_left_align = tile_left_align(out->fmt);
  657. unsigned int out_top_align = tile_top_align(out->fmt);
  658. unsigned int out_width_align = tile_width_align(out->type, out->fmt,
  659. ctx->rot_mode);
  660. unsigned int out_height_align = tile_height_align(out->type, out->fmt,
  661. ctx->rot_mode);
  662. unsigned int in_right = in->base.rect.width;
  663. unsigned int in_bottom = in->base.rect.height;
  664. unsigned int out_right = out->base.rect.width;
  665. unsigned int out_bottom = out->base.rect.height;
  666. unsigned int flipped_out_left;
  667. unsigned int flipped_out_top;
  668. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  669. /* Switch width/height and align top left to IRT block size */
  670. resized_width = out->base.rect.height;
  671. resized_height = out->base.rect.width;
  672. out_left_align = out_height_align;
  673. out_top_align = out_width_align;
  674. out_width_align = out_left_align;
  675. out_height_align = out_top_align;
  676. out_right = out->base.rect.height;
  677. out_bottom = out->base.rect.width;
  678. }
  679. for (col = in->num_cols - 1; col > 0; col--) {
  680. bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) ||
  681. !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
  682. bool allow_out_overshoot = (col < in->num_cols - 1) &&
  683. !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
  684. unsigned int in_left;
  685. unsigned int out_left;
  686. /*
  687. * Align input width to burst length if the scaling step flips
  688. * horizontally.
  689. */
  690. find_best_seam(ctx, col,
  691. in_right, out_right,
  692. in_left_align, out_left_align,
  693. allow_in_overshoot ? 1 : 8 /* burst length */,
  694. allow_out_overshoot ? 1 : out_width_align,
  695. ctx->downsize_coeff_h, ctx->image_resize_coeff_h,
  696. &in_left, &out_left);
  697. if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
  698. flipped_out_left = resized_width - out_right;
  699. else
  700. flipped_out_left = out_left;
  701. fill_tile_column(ctx, col, in, in_left, in_right - in_left,
  702. out, flipped_out_left, out_right - out_left);
  703. dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col,
  704. in_left, in_right - in_left,
  705. flipped_out_left, out_right - out_left);
  706. in_right = in_left;
  707. out_right = out_left;
  708. }
  709. flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ?
  710. resized_width - out_right : 0;
  711. fill_tile_column(ctx, 0, in, 0, in_right,
  712. out, flipped_out_left, out_right);
  713. dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__,
  714. in_right, flipped_out_left, out_right);
  715. for (row = in->num_rows - 1; row > 0; row--) {
  716. bool allow_overshoot = row < in->num_rows - 1;
  717. unsigned int in_top;
  718. unsigned int out_top;
  719. find_best_seam(ctx, row,
  720. in_bottom, out_bottom,
  721. in_top_align, out_top_align,
  722. 1, allow_overshoot ? 1 : out_height_align,
  723. ctx->downsize_coeff_v, ctx->image_resize_coeff_v,
  724. &in_top, &out_top);
  725. if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
  726. ipu_rot_mode_is_irt(ctx->rot_mode))
  727. flipped_out_top = resized_height - out_bottom;
  728. else
  729. flipped_out_top = out_top;
  730. fill_tile_row(ctx, row, in, in_top, in_bottom - in_top,
  731. out, flipped_out_top, out_bottom - out_top);
  732. dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row,
  733. in_top, in_bottom - in_top,
  734. flipped_out_top, out_bottom - out_top);
  735. in_bottom = in_top;
  736. out_bottom = out_top;
  737. }
  738. if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
  739. ipu_rot_mode_is_irt(ctx->rot_mode))
  740. flipped_out_top = resized_height - out_bottom;
  741. else
  742. flipped_out_top = 0;
  743. fill_tile_row(ctx, 0, in, 0, in_bottom,
  744. out, flipped_out_top, out_bottom);
  745. dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__,
  746. in_bottom, flipped_out_top, out_bottom);
  747. }
  748. static int calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
  749. struct ipu_image_convert_image *image)
  750. {
  751. struct ipu_image_convert_chan *chan = ctx->chan;
  752. struct ipu_image_convert_priv *priv = chan->priv;
  753. unsigned int max_width = 1024;
  754. unsigned int max_height = 1024;
  755. unsigned int i;
  756. if (image->type == IMAGE_CONVERT_IN) {
  757. /* Up to 4096x4096 input tile size */
  758. max_width <<= ctx->downsize_coeff_h;
  759. max_height <<= ctx->downsize_coeff_v;
  760. }
  761. for (i = 0; i < ctx->num_tiles; i++) {
  762. struct ipu_image_tile *tile;
  763. const unsigned int row = i / image->num_cols;
  764. const unsigned int col = i % image->num_cols;
  765. if (image->type == IMAGE_CONVERT_OUT)
  766. tile = &image->tile[ctx->out_tile_map[i]];
  767. else
  768. tile = &image->tile[i];
  769. tile->size = ((tile->height * image->fmt->bpp) >> 3) *
  770. tile->width;
  771. if (image->fmt->planar) {
  772. tile->stride = tile->width;
  773. tile->rot_stride = tile->height;
  774. } else {
  775. tile->stride =
  776. (image->fmt->bpp * tile->width) >> 3;
  777. tile->rot_stride =
  778. (image->fmt->bpp * tile->height) >> 3;
  779. }
  780. dev_dbg(priv->ipu->dev,
  781. "task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n",
  782. chan->ic_task, ctx,
  783. image->type == IMAGE_CONVERT_IN ? "Input" : "Output",
  784. row, col,
  785. tile->width, tile->height, tile->left, tile->top);
  786. if (!tile->width || tile->width > max_width ||
  787. !tile->height || tile->height > max_height) {
  788. dev_err(priv->ipu->dev, "invalid %s tile size: %ux%u\n",
  789. image->type == IMAGE_CONVERT_IN ? "input" :
  790. "output", tile->width, tile->height);
  791. return -EINVAL;
  792. }
  793. }
  794. return 0;
  795. }
  796. /*
  797. * Use the rotation transformation to find the tile coordinates
  798. * (row, col) of a tile in the destination frame that corresponds
  799. * to the given tile coordinates of a source frame. The destination
  800. * coordinate is then converted to a tile index.
  801. */
  802. static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
  803. int src_row, int src_col)
  804. {
  805. struct ipu_image_convert_chan *chan = ctx->chan;
  806. struct ipu_image_convert_priv *priv = chan->priv;
  807. struct ipu_image_convert_image *s_image = &ctx->in;
  808. struct ipu_image_convert_image *d_image = &ctx->out;
  809. int dst_row, dst_col;
  810. /* with no rotation it's a 1:1 mapping */
  811. if (ctx->rot_mode == IPU_ROTATE_NONE)
  812. return src_row * s_image->num_cols + src_col;
  813. /*
  814. * before doing the transform, first we have to translate
  815. * source row,col for an origin in the center of s_image
  816. */
  817. src_row = src_row * 2 - (s_image->num_rows - 1);
  818. src_col = src_col * 2 - (s_image->num_cols - 1);
  819. /* do the rotation transform */
  820. if (ctx->rot_mode & IPU_ROT_BIT_90) {
  821. dst_col = -src_row;
  822. dst_row = src_col;
  823. } else {
  824. dst_col = src_col;
  825. dst_row = src_row;
  826. }
  827. /* apply flip */
  828. if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
  829. dst_col = -dst_col;
  830. if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
  831. dst_row = -dst_row;
  832. dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
  833. chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
  834. /*
  835. * finally translate dest row,col using an origin in upper
  836. * left of d_image
  837. */
  838. dst_row += d_image->num_rows - 1;
  839. dst_col += d_image->num_cols - 1;
  840. dst_row /= 2;
  841. dst_col /= 2;
  842. return dst_row * d_image->num_cols + dst_col;
  843. }
  844. /*
  845. * Fill the out_tile_map[] with transformed destination tile indeces.
  846. */
  847. static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
  848. {
  849. struct ipu_image_convert_image *s_image = &ctx->in;
  850. unsigned int row, col, tile = 0;
  851. for (row = 0; row < s_image->num_rows; row++) {
  852. for (col = 0; col < s_image->num_cols; col++) {
  853. ctx->out_tile_map[tile] =
  854. transform_tile_index(ctx, row, col);
  855. tile++;
  856. }
  857. }
  858. }
  859. static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
  860. struct ipu_image_convert_image *image)
  861. {
  862. struct ipu_image_convert_chan *chan = ctx->chan;
  863. struct ipu_image_convert_priv *priv = chan->priv;
  864. const struct ipu_image_pixfmt *fmt = image->fmt;
  865. unsigned int row, col, tile = 0;
  866. u32 H, top, y_stride, uv_stride;
  867. u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
  868. u32 y_row_off, y_col_off, y_off;
  869. u32 y_size, uv_size;
  870. /* setup some convenience vars */
  871. H = image->base.pix.height;
  872. y_stride = image->stride;
  873. uv_stride = y_stride / fmt->uv_width_dec;
  874. if (fmt->uv_packed)
  875. uv_stride *= 2;
  876. y_size = H * y_stride;
  877. uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
  878. for (row = 0; row < image->num_rows; row++) {
  879. top = image->tile[tile].top;
  880. y_row_off = top * y_stride;
  881. uv_row_off = (top * uv_stride) / fmt->uv_height_dec;
  882. for (col = 0; col < image->num_cols; col++) {
  883. y_col_off = image->tile[tile].left;
  884. uv_col_off = y_col_off / fmt->uv_width_dec;
  885. if (fmt->uv_packed)
  886. uv_col_off *= 2;
  887. y_off = y_row_off + y_col_off;
  888. uv_off = uv_row_off + uv_col_off;
  889. u_off = y_size - y_off + uv_off;
  890. v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
  891. if (fmt->uv_swapped) {
  892. tmp = u_off;
  893. u_off = v_off;
  894. v_off = tmp;
  895. }
  896. image->tile[tile].offset = y_off;
  897. image->tile[tile].u_off = u_off;
  898. image->tile[tile++].v_off = v_off;
  899. if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) {
  900. dev_err(priv->ipu->dev,
  901. "task %u: ctx %p: %s@[%d,%d]: "
  902. "y_off %08x, u_off %08x, v_off %08x\n",
  903. chan->ic_task, ctx,
  904. image->type == IMAGE_CONVERT_IN ?
  905. "Input" : "Output", row, col,
  906. y_off, u_off, v_off);
  907. return -EINVAL;
  908. }
  909. }
  910. }
  911. return 0;
  912. }
  913. static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
  914. struct ipu_image_convert_image *image)
  915. {
  916. struct ipu_image_convert_chan *chan = ctx->chan;
  917. struct ipu_image_convert_priv *priv = chan->priv;
  918. const struct ipu_image_pixfmt *fmt = image->fmt;
  919. unsigned int row, col, tile = 0;
  920. u32 bpp, stride, offset;
  921. u32 row_off, col_off;
  922. /* setup some convenience vars */
  923. stride = image->stride;
  924. bpp = fmt->bpp;
  925. for (row = 0; row < image->num_rows; row++) {
  926. row_off = image->tile[tile].top * stride;
  927. for (col = 0; col < image->num_cols; col++) {
  928. col_off = (image->tile[tile].left * bpp) >> 3;
  929. offset = row_off + col_off;
  930. image->tile[tile].offset = offset;
  931. image->tile[tile].u_off = 0;
  932. image->tile[tile++].v_off = 0;
  933. if (offset & 0x7) {
  934. dev_err(priv->ipu->dev,
  935. "task %u: ctx %p: %s@[%d,%d]: "
  936. "phys %08x\n",
  937. chan->ic_task, ctx,
  938. image->type == IMAGE_CONVERT_IN ?
  939. "Input" : "Output", row, col,
  940. row_off + col_off);
  941. return -EINVAL;
  942. }
  943. }
  944. }
  945. return 0;
  946. }
  947. static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
  948. struct ipu_image_convert_image *image)
  949. {
  950. if (image->fmt->planar)
  951. return calc_tile_offsets_planar(ctx, image);
  952. return calc_tile_offsets_packed(ctx, image);
  953. }
  954. /*
  955. * Calculate the resizing ratio for the IC main processing section given input
  956. * size, fixed downsizing coefficient, and output size.
  957. * Either round to closest for the next tile's first pixel to minimize seams
  958. * and distortion (for all but right column / bottom row), or round down to
  959. * avoid sampling beyond the edges of the input image for this tile's last
  960. * pixel.
  961. * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff.
  962. */
  963. static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff,
  964. u32 output_size, bool allow_overshoot)
  965. {
  966. u32 downsized = input_size >> downsize_coeff;
  967. if (allow_overshoot)
  968. return DIV_ROUND_CLOSEST(8192 * downsized, output_size);
  969. else
  970. return 8192 * (downsized - 1) / (output_size - 1);
  971. }
  972. /*
  973. * Slightly modify resize coefficients per tile to hide the bilinear
  974. * interpolator reset at tile borders, shifting the right / bottom edge
  975. * by up to a half input pixel. This removes noticeable seams between
  976. * tiles at higher upscaling factors.
  977. */
  978. static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
  979. {
  980. struct ipu_image_convert_chan *chan = ctx->chan;
  981. struct ipu_image_convert_priv *priv = chan->priv;
  982. struct ipu_image_tile *in_tile, *out_tile;
  983. unsigned int col, row, tile_idx;
  984. unsigned int last_output;
  985. for (col = 0; col < ctx->in.num_cols; col++) {
  986. bool closest = (col < ctx->in.num_cols - 1) &&
  987. !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
  988. u32 resized_width;
  989. u32 resize_coeff_h;
  990. u32 in_width;
  991. tile_idx = col;
  992. in_tile = &ctx->in.tile[tile_idx];
  993. out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
  994. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  995. resized_width = out_tile->height;
  996. else
  997. resized_width = out_tile->width;
  998. resize_coeff_h = calc_resize_coeff(in_tile->width,
  999. ctx->downsize_coeff_h,
  1000. resized_width, closest);
  1001. dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n",
  1002. __func__, col, resize_coeff_h);
  1003. /*
  1004. * With the horizontal scaling factor known, round up resized
  1005. * width (output width or height) to burst size.
  1006. */
  1007. resized_width = round_up(resized_width, 8);
  1008. /*
  1009. * Calculate input width from the last accessed input pixel
  1010. * given resized width and scaling coefficients. Round up to
  1011. * burst size.
  1012. */
  1013. last_output = resized_width - 1;
  1014. if (closest && ((last_output * resize_coeff_h) % 8192))
  1015. last_output++;
  1016. in_width = round_up(
  1017. (DIV_ROUND_UP(last_output * resize_coeff_h, 8192) + 1)
  1018. << ctx->downsize_coeff_h, 8);
  1019. for (row = 0; row < ctx->in.num_rows; row++) {
  1020. tile_idx = row * ctx->in.num_cols + col;
  1021. in_tile = &ctx->in.tile[tile_idx];
  1022. out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
  1023. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1024. out_tile->height = resized_width;
  1025. else
  1026. out_tile->width = resized_width;
  1027. in_tile->width = in_width;
  1028. }
  1029. ctx->resize_coeffs_h[col] = resize_coeff_h;
  1030. }
  1031. for (row = 0; row < ctx->in.num_rows; row++) {
  1032. bool closest = (row < ctx->in.num_rows - 1) &&
  1033. !(ctx->rot_mode & IPU_ROT_BIT_VFLIP);
  1034. u32 resized_height;
  1035. u32 resize_coeff_v;
  1036. u32 in_height;
  1037. tile_idx = row * ctx->in.num_cols;
  1038. in_tile = &ctx->in.tile[tile_idx];
  1039. out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
  1040. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1041. resized_height = out_tile->width;
  1042. else
  1043. resized_height = out_tile->height;
  1044. resize_coeff_v = calc_resize_coeff(in_tile->height,
  1045. ctx->downsize_coeff_v,
  1046. resized_height, closest);
  1047. dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n",
  1048. __func__, row, resize_coeff_v);
  1049. /*
  1050. * With the vertical scaling factor known, round up resized
  1051. * height (output width or height) to IDMAC limitations.
  1052. */
  1053. resized_height = round_up(resized_height, 2);
  1054. /*
  1055. * Calculate input width from the last accessed input pixel
  1056. * given resized height and scaling coefficients. Align to
  1057. * IDMAC restrictions.
  1058. */
  1059. last_output = resized_height - 1;
  1060. if (closest && ((last_output * resize_coeff_v) % 8192))
  1061. last_output++;
  1062. in_height = round_up(
  1063. (DIV_ROUND_UP(last_output * resize_coeff_v, 8192) + 1)
  1064. << ctx->downsize_coeff_v, 2);
  1065. for (col = 0; col < ctx->in.num_cols; col++) {
  1066. tile_idx = row * ctx->in.num_cols + col;
  1067. in_tile = &ctx->in.tile[tile_idx];
  1068. out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
  1069. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1070. out_tile->width = resized_height;
  1071. else
  1072. out_tile->height = resized_height;
  1073. in_tile->height = in_height;
  1074. }
  1075. ctx->resize_coeffs_v[row] = resize_coeff_v;
  1076. }
  1077. }
  1078. /*
  1079. * return the number of runs in given queue (pending_q or done_q)
  1080. * for this context. hold irqlock when calling.
  1081. */
  1082. static int get_run_count(struct ipu_image_convert_ctx *ctx,
  1083. struct list_head *q)
  1084. {
  1085. struct ipu_image_convert_run *run;
  1086. int count = 0;
  1087. lockdep_assert_held(&ctx->chan->irqlock);
  1088. list_for_each_entry(run, q, list) {
  1089. if (run->ctx == ctx)
  1090. count++;
  1091. }
  1092. return count;
  1093. }
  1094. static void convert_stop(struct ipu_image_convert_run *run)
  1095. {
  1096. struct ipu_image_convert_ctx *ctx = run->ctx;
  1097. struct ipu_image_convert_chan *chan = ctx->chan;
  1098. struct ipu_image_convert_priv *priv = chan->priv;
  1099. dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
  1100. __func__, chan->ic_task, ctx, run);
  1101. /* disable IC tasks and the channels */
  1102. ipu_ic_task_disable(chan->ic);
  1103. ipu_idmac_disable_channel(chan->in_chan);
  1104. ipu_idmac_disable_channel(chan->out_chan);
  1105. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1106. ipu_idmac_disable_channel(chan->rotation_in_chan);
  1107. ipu_idmac_disable_channel(chan->rotation_out_chan);
  1108. ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
  1109. }
  1110. ipu_ic_disable(chan->ic);
  1111. }
  1112. static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
  1113. struct ipuv3_channel *channel,
  1114. struct ipu_image_convert_image *image,
  1115. enum ipu_rotate_mode rot_mode,
  1116. bool rot_swap_width_height,
  1117. unsigned int tile)
  1118. {
  1119. struct ipu_image_convert_chan *chan = ctx->chan;
  1120. unsigned int burst_size;
  1121. u32 width, height, stride;
  1122. dma_addr_t addr0, addr1 = 0;
  1123. struct ipu_image tile_image;
  1124. unsigned int tile_idx[2];
  1125. if (image->type == IMAGE_CONVERT_OUT) {
  1126. tile_idx[0] = ctx->out_tile_map[tile];
  1127. tile_idx[1] = ctx->out_tile_map[1];
  1128. } else {
  1129. tile_idx[0] = tile;
  1130. tile_idx[1] = 1;
  1131. }
  1132. if (rot_swap_width_height) {
  1133. width = image->tile[tile_idx[0]].height;
  1134. height = image->tile[tile_idx[0]].width;
  1135. stride = image->tile[tile_idx[0]].rot_stride;
  1136. addr0 = ctx->rot_intermediate[0].phys;
  1137. if (ctx->double_buffering)
  1138. addr1 = ctx->rot_intermediate[1].phys;
  1139. } else {
  1140. width = image->tile[tile_idx[0]].width;
  1141. height = image->tile[tile_idx[0]].height;
  1142. stride = image->stride;
  1143. addr0 = image->base.phys0 +
  1144. image->tile[tile_idx[0]].offset;
  1145. if (ctx->double_buffering)
  1146. addr1 = image->base.phys0 +
  1147. image->tile[tile_idx[1]].offset;
  1148. }
  1149. ipu_cpmem_zero(channel);
  1150. memset(&tile_image, 0, sizeof(tile_image));
  1151. tile_image.pix.width = tile_image.rect.width = width;
  1152. tile_image.pix.height = tile_image.rect.height = height;
  1153. tile_image.pix.bytesperline = stride;
  1154. tile_image.pix.pixelformat = image->fmt->fourcc;
  1155. tile_image.phys0 = addr0;
  1156. tile_image.phys1 = addr1;
  1157. if (image->fmt->planar && !rot_swap_width_height) {
  1158. tile_image.u_offset = image->tile[tile_idx[0]].u_off;
  1159. tile_image.v_offset = image->tile[tile_idx[0]].v_off;
  1160. }
  1161. ipu_cpmem_set_image(channel, &tile_image);
  1162. if (rot_mode)
  1163. ipu_cpmem_set_rotation(channel, rot_mode);
  1164. /*
  1165. * Skip writing U and V components to odd rows in the output
  1166. * channels for planar 4:2:0.
  1167. */
  1168. if ((channel == chan->out_chan ||
  1169. channel == chan->rotation_out_chan) &&
  1170. image->fmt->planar && image->fmt->uv_height_dec == 2)
  1171. ipu_cpmem_skip_odd_chroma_rows(channel);
  1172. if (channel == chan->rotation_in_chan ||
  1173. channel == chan->rotation_out_chan) {
  1174. burst_size = 8;
  1175. ipu_cpmem_set_block_mode(channel);
  1176. } else
  1177. burst_size = (width % 16) ? 8 : 16;
  1178. ipu_cpmem_set_burstsize(channel, burst_size);
  1179. ipu_ic_task_idma_init(chan->ic, channel, width, height,
  1180. burst_size, rot_mode);
  1181. /*
  1182. * Setting a non-zero AXI ID collides with the PRG AXI snooping, so
  1183. * only do this when there is no PRG present.
  1184. */
  1185. if (!channel->ipu->prg_priv)
  1186. ipu_cpmem_set_axi_id(channel, 1);
  1187. ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
  1188. }
  1189. static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
  1190. {
  1191. struct ipu_image_convert_ctx *ctx = run->ctx;
  1192. struct ipu_image_convert_chan *chan = ctx->chan;
  1193. struct ipu_image_convert_priv *priv = chan->priv;
  1194. struct ipu_image_convert_image *s_image = &ctx->in;
  1195. struct ipu_image_convert_image *d_image = &ctx->out;
  1196. unsigned int dst_tile = ctx->out_tile_map[tile];
  1197. unsigned int dest_width, dest_height;
  1198. unsigned int col, row;
  1199. u32 rsc;
  1200. int ret;
  1201. dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
  1202. __func__, chan->ic_task, ctx, run, tile, dst_tile);
  1203. /* clear EOF irq mask */
  1204. ctx->eof_mask = 0;
  1205. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1206. /* swap width/height for resizer */
  1207. dest_width = d_image->tile[dst_tile].height;
  1208. dest_height = d_image->tile[dst_tile].width;
  1209. } else {
  1210. dest_width = d_image->tile[dst_tile].width;
  1211. dest_height = d_image->tile[dst_tile].height;
  1212. }
  1213. row = tile / s_image->num_cols;
  1214. col = tile % s_image->num_cols;
  1215. rsc = (ctx->downsize_coeff_v << 30) |
  1216. (ctx->resize_coeffs_v[row] << 16) |
  1217. (ctx->downsize_coeff_h << 14) |
  1218. (ctx->resize_coeffs_h[col]);
  1219. dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n",
  1220. __func__, s_image->tile[tile].width,
  1221. s_image->tile[tile].height, dest_width, dest_height, rsc);
  1222. /* setup the IC resizer and CSC */
  1223. ret = ipu_ic_task_init_rsc(chan->ic, &ctx->csc,
  1224. s_image->tile[tile].width,
  1225. s_image->tile[tile].height,
  1226. dest_width,
  1227. dest_height,
  1228. rsc);
  1229. if (ret) {
  1230. dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
  1231. return ret;
  1232. }
  1233. /* init the source MEM-->IC PP IDMAC channel */
  1234. init_idmac_channel(ctx, chan->in_chan, s_image,
  1235. IPU_ROTATE_NONE, false, tile);
  1236. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1237. /* init the IC PP-->MEM IDMAC channel */
  1238. init_idmac_channel(ctx, chan->out_chan, d_image,
  1239. IPU_ROTATE_NONE, true, tile);
  1240. /* init the MEM-->IC PP ROT IDMAC channel */
  1241. init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
  1242. ctx->rot_mode, true, tile);
  1243. /* init the destination IC PP ROT-->MEM IDMAC channel */
  1244. init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
  1245. IPU_ROTATE_NONE, false, tile);
  1246. /* now link IC PP-->MEM to MEM-->IC PP ROT */
  1247. ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
  1248. } else {
  1249. /* init the destination IC PP-->MEM IDMAC channel */
  1250. init_idmac_channel(ctx, chan->out_chan, d_image,
  1251. ctx->rot_mode, false, tile);
  1252. }
  1253. /* enable the IC */
  1254. ipu_ic_enable(chan->ic);
  1255. /* set buffers ready */
  1256. ipu_idmac_select_buffer(chan->in_chan, 0);
  1257. ipu_idmac_select_buffer(chan->out_chan, 0);
  1258. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1259. ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
  1260. if (ctx->double_buffering) {
  1261. ipu_idmac_select_buffer(chan->in_chan, 1);
  1262. ipu_idmac_select_buffer(chan->out_chan, 1);
  1263. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1264. ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
  1265. }
  1266. /* enable the channels! */
  1267. ipu_idmac_enable_channel(chan->in_chan);
  1268. ipu_idmac_enable_channel(chan->out_chan);
  1269. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1270. ipu_idmac_enable_channel(chan->rotation_in_chan);
  1271. ipu_idmac_enable_channel(chan->rotation_out_chan);
  1272. }
  1273. ipu_ic_task_enable(chan->ic);
  1274. ipu_cpmem_dump(chan->in_chan);
  1275. ipu_cpmem_dump(chan->out_chan);
  1276. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1277. ipu_cpmem_dump(chan->rotation_in_chan);
  1278. ipu_cpmem_dump(chan->rotation_out_chan);
  1279. }
  1280. ipu_dump(priv->ipu);
  1281. return 0;
  1282. }
  1283. /* hold irqlock when calling */
  1284. static int do_run(struct ipu_image_convert_run *run)
  1285. {
  1286. struct ipu_image_convert_ctx *ctx = run->ctx;
  1287. struct ipu_image_convert_chan *chan = ctx->chan;
  1288. lockdep_assert_held(&chan->irqlock);
  1289. ctx->in.base.phys0 = run->in_phys;
  1290. ctx->out.base.phys0 = run->out_phys;
  1291. ctx->cur_buf_num = 0;
  1292. ctx->next_tile = 1;
  1293. /* remove run from pending_q and set as current */
  1294. list_del(&run->list);
  1295. chan->current_run = run;
  1296. return convert_start(run, 0);
  1297. }
  1298. /* hold irqlock when calling */
  1299. static void run_next(struct ipu_image_convert_chan *chan)
  1300. {
  1301. struct ipu_image_convert_priv *priv = chan->priv;
  1302. struct ipu_image_convert_run *run, *tmp;
  1303. int ret;
  1304. lockdep_assert_held(&chan->irqlock);
  1305. list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
  1306. /* skip contexts that are aborting */
  1307. if (run->ctx->aborting) {
  1308. dev_dbg(priv->ipu->dev,
  1309. "%s: task %u: skipping aborting ctx %p run %p\n",
  1310. __func__, chan->ic_task, run->ctx, run);
  1311. continue;
  1312. }
  1313. ret = do_run(run);
  1314. if (!ret)
  1315. break;
  1316. /*
  1317. * something went wrong with start, add the run
  1318. * to done q and continue to the next run in the
  1319. * pending q.
  1320. */
  1321. run->status = ret;
  1322. list_add_tail(&run->list, &chan->done_q);
  1323. chan->current_run = NULL;
  1324. }
  1325. }
  1326. static void empty_done_q(struct ipu_image_convert_chan *chan)
  1327. {
  1328. struct ipu_image_convert_priv *priv = chan->priv;
  1329. struct ipu_image_convert_run *run;
  1330. unsigned long flags;
  1331. spin_lock_irqsave(&chan->irqlock, flags);
  1332. while (!list_empty(&chan->done_q)) {
  1333. run = list_entry(chan->done_q.next,
  1334. struct ipu_image_convert_run,
  1335. list);
  1336. list_del(&run->list);
  1337. dev_dbg(priv->ipu->dev,
  1338. "%s: task %u: completing ctx %p run %p with %d\n",
  1339. __func__, chan->ic_task, run->ctx, run, run->status);
  1340. /* call the completion callback and free the run */
  1341. spin_unlock_irqrestore(&chan->irqlock, flags);
  1342. run->ctx->complete(run, run->ctx->complete_context);
  1343. spin_lock_irqsave(&chan->irqlock, flags);
  1344. }
  1345. spin_unlock_irqrestore(&chan->irqlock, flags);
  1346. }
  1347. /*
  1348. * the bottom half thread clears out the done_q, calling the
  1349. * completion handler for each.
  1350. */
  1351. static irqreturn_t do_bh(int irq, void *dev_id)
  1352. {
  1353. struct ipu_image_convert_chan *chan = dev_id;
  1354. struct ipu_image_convert_priv *priv = chan->priv;
  1355. struct ipu_image_convert_ctx *ctx;
  1356. unsigned long flags;
  1357. dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
  1358. chan->ic_task);
  1359. empty_done_q(chan);
  1360. spin_lock_irqsave(&chan->irqlock, flags);
  1361. /*
  1362. * the done_q is cleared out, signal any contexts
  1363. * that are aborting that abort can complete.
  1364. */
  1365. list_for_each_entry(ctx, &chan->ctx_list, list) {
  1366. if (ctx->aborting) {
  1367. dev_dbg(priv->ipu->dev,
  1368. "%s: task %u: signaling abort for ctx %p\n",
  1369. __func__, chan->ic_task, ctx);
  1370. complete_all(&ctx->aborted);
  1371. }
  1372. }
  1373. spin_unlock_irqrestore(&chan->irqlock, flags);
  1374. dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
  1375. chan->ic_task);
  1376. return IRQ_HANDLED;
  1377. }
  1378. static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
  1379. {
  1380. unsigned int cur_tile = ctx->next_tile - 1;
  1381. unsigned int next_tile = ctx->next_tile;
  1382. if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] !=
  1383. ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] ||
  1384. ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] !=
  1385. ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] ||
  1386. ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width ||
  1387. ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height ||
  1388. ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width ||
  1389. ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height)
  1390. return true;
  1391. return false;
  1392. }
  1393. /* hold irqlock when calling */
  1394. static irqreturn_t do_tile_complete(struct ipu_image_convert_run *run)
  1395. {
  1396. struct ipu_image_convert_ctx *ctx = run->ctx;
  1397. struct ipu_image_convert_chan *chan = ctx->chan;
  1398. struct ipu_image_tile *src_tile, *dst_tile;
  1399. struct ipu_image_convert_image *s_image = &ctx->in;
  1400. struct ipu_image_convert_image *d_image = &ctx->out;
  1401. struct ipuv3_channel *outch;
  1402. unsigned int dst_idx;
  1403. lockdep_assert_held(&chan->irqlock);
  1404. outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
  1405. chan->rotation_out_chan : chan->out_chan;
  1406. /*
  1407. * It is difficult to stop the channel DMA before the channels
  1408. * enter the paused state. Without double-buffering the channels
  1409. * are always in a paused state when the EOF irq occurs, so it
  1410. * is safe to stop the channels now. For double-buffering we
  1411. * just ignore the abort until the operation completes, when it
  1412. * is safe to shut down.
  1413. */
  1414. if (ctx->aborting && !ctx->double_buffering) {
  1415. convert_stop(run);
  1416. run->status = -EIO;
  1417. goto done;
  1418. }
  1419. if (ctx->next_tile == ctx->num_tiles) {
  1420. /*
  1421. * the conversion is complete
  1422. */
  1423. convert_stop(run);
  1424. run->status = 0;
  1425. goto done;
  1426. }
  1427. /*
  1428. * not done, place the next tile buffers.
  1429. */
  1430. if (!ctx->double_buffering) {
  1431. if (ic_settings_changed(ctx)) {
  1432. convert_stop(run);
  1433. convert_start(run, ctx->next_tile);
  1434. } else {
  1435. src_tile = &s_image->tile[ctx->next_tile];
  1436. dst_idx = ctx->out_tile_map[ctx->next_tile];
  1437. dst_tile = &d_image->tile[dst_idx];
  1438. ipu_cpmem_set_buffer(chan->in_chan, 0,
  1439. s_image->base.phys0 +
  1440. src_tile->offset);
  1441. ipu_cpmem_set_buffer(outch, 0,
  1442. d_image->base.phys0 +
  1443. dst_tile->offset);
  1444. if (s_image->fmt->planar)
  1445. ipu_cpmem_set_uv_offset(chan->in_chan,
  1446. src_tile->u_off,
  1447. src_tile->v_off);
  1448. if (d_image->fmt->planar)
  1449. ipu_cpmem_set_uv_offset(outch,
  1450. dst_tile->u_off,
  1451. dst_tile->v_off);
  1452. ipu_idmac_select_buffer(chan->in_chan, 0);
  1453. ipu_idmac_select_buffer(outch, 0);
  1454. }
  1455. } else if (ctx->next_tile < ctx->num_tiles - 1) {
  1456. src_tile = &s_image->tile[ctx->next_tile + 1];
  1457. dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
  1458. dst_tile = &d_image->tile[dst_idx];
  1459. ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
  1460. s_image->base.phys0 + src_tile->offset);
  1461. ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
  1462. d_image->base.phys0 + dst_tile->offset);
  1463. ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
  1464. ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
  1465. ctx->cur_buf_num ^= 1;
  1466. }
  1467. ctx->eof_mask = 0; /* clear EOF irq mask for next tile */
  1468. ctx->next_tile++;
  1469. return IRQ_HANDLED;
  1470. done:
  1471. list_add_tail(&run->list, &chan->done_q);
  1472. chan->current_run = NULL;
  1473. run_next(chan);
  1474. return IRQ_WAKE_THREAD;
  1475. }
  1476. static irqreturn_t eof_irq(int irq, void *data)
  1477. {
  1478. struct ipu_image_convert_chan *chan = data;
  1479. struct ipu_image_convert_priv *priv = chan->priv;
  1480. struct ipu_image_convert_ctx *ctx;
  1481. struct ipu_image_convert_run *run;
  1482. irqreturn_t ret = IRQ_HANDLED;
  1483. bool tile_complete = false;
  1484. unsigned long flags;
  1485. spin_lock_irqsave(&chan->irqlock, flags);
  1486. /* get current run and its context */
  1487. run = chan->current_run;
  1488. if (!run) {
  1489. ret = IRQ_NONE;
  1490. goto out;
  1491. }
  1492. ctx = run->ctx;
  1493. if (irq == chan->in_eof_irq) {
  1494. ctx->eof_mask |= EOF_IRQ_IN;
  1495. } else if (irq == chan->out_eof_irq) {
  1496. ctx->eof_mask |= EOF_IRQ_OUT;
  1497. } else if (irq == chan->rot_in_eof_irq ||
  1498. irq == chan->rot_out_eof_irq) {
  1499. if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1500. /* this was NOT a rotation op, shouldn't happen */
  1501. dev_err(priv->ipu->dev,
  1502. "Unexpected rotation interrupt\n");
  1503. goto out;
  1504. }
  1505. ctx->eof_mask |= (irq == chan->rot_in_eof_irq) ?
  1506. EOF_IRQ_ROT_IN : EOF_IRQ_ROT_OUT;
  1507. } else {
  1508. dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq);
  1509. ret = IRQ_NONE;
  1510. goto out;
  1511. }
  1512. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1513. tile_complete = (ctx->eof_mask == EOF_IRQ_ROT_COMPLETE);
  1514. else
  1515. tile_complete = (ctx->eof_mask == EOF_IRQ_COMPLETE);
  1516. if (tile_complete)
  1517. ret = do_tile_complete(run);
  1518. out:
  1519. spin_unlock_irqrestore(&chan->irqlock, flags);
  1520. return ret;
  1521. }
  1522. /*
  1523. * try to force the completion of runs for this ctx. Called when
  1524. * abort wait times out in ipu_image_convert_abort().
  1525. */
  1526. static void force_abort(struct ipu_image_convert_ctx *ctx)
  1527. {
  1528. struct ipu_image_convert_chan *chan = ctx->chan;
  1529. struct ipu_image_convert_run *run;
  1530. unsigned long flags;
  1531. spin_lock_irqsave(&chan->irqlock, flags);
  1532. run = chan->current_run;
  1533. if (run && run->ctx == ctx) {
  1534. convert_stop(run);
  1535. run->status = -EIO;
  1536. list_add_tail(&run->list, &chan->done_q);
  1537. chan->current_run = NULL;
  1538. run_next(chan);
  1539. }
  1540. spin_unlock_irqrestore(&chan->irqlock, flags);
  1541. empty_done_q(chan);
  1542. }
  1543. static void release_ipu_resources(struct ipu_image_convert_chan *chan)
  1544. {
  1545. if (chan->in_eof_irq >= 0)
  1546. free_irq(chan->in_eof_irq, chan);
  1547. if (chan->rot_in_eof_irq >= 0)
  1548. free_irq(chan->rot_in_eof_irq, chan);
  1549. if (chan->out_eof_irq >= 0)
  1550. free_irq(chan->out_eof_irq, chan);
  1551. if (chan->rot_out_eof_irq >= 0)
  1552. free_irq(chan->rot_out_eof_irq, chan);
  1553. if (!IS_ERR_OR_NULL(chan->in_chan))
  1554. ipu_idmac_put(chan->in_chan);
  1555. if (!IS_ERR_OR_NULL(chan->out_chan))
  1556. ipu_idmac_put(chan->out_chan);
  1557. if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
  1558. ipu_idmac_put(chan->rotation_in_chan);
  1559. if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
  1560. ipu_idmac_put(chan->rotation_out_chan);
  1561. if (!IS_ERR_OR_NULL(chan->ic))
  1562. ipu_ic_put(chan->ic);
  1563. chan->in_chan = chan->out_chan = chan->rotation_in_chan =
  1564. chan->rotation_out_chan = NULL;
  1565. chan->in_eof_irq = -1;
  1566. chan->rot_in_eof_irq = -1;
  1567. chan->out_eof_irq = -1;
  1568. chan->rot_out_eof_irq = -1;
  1569. }
  1570. static int get_eof_irq(struct ipu_image_convert_chan *chan,
  1571. struct ipuv3_channel *channel)
  1572. {
  1573. struct ipu_image_convert_priv *priv = chan->priv;
  1574. int ret, irq;
  1575. irq = ipu_idmac_channel_irq(priv->ipu, channel, IPU_IRQ_EOF);
  1576. ret = request_threaded_irq(irq, eof_irq, do_bh, 0, "ipu-ic", chan);
  1577. if (ret < 0) {
  1578. dev_err(priv->ipu->dev, "could not acquire irq %d\n", irq);
  1579. return ret;
  1580. }
  1581. return irq;
  1582. }
  1583. static int get_ipu_resources(struct ipu_image_convert_chan *chan)
  1584. {
  1585. const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
  1586. struct ipu_image_convert_priv *priv = chan->priv;
  1587. int ret;
  1588. /* get IC */
  1589. chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
  1590. if (IS_ERR(chan->ic)) {
  1591. dev_err(priv->ipu->dev, "could not acquire IC\n");
  1592. ret = PTR_ERR(chan->ic);
  1593. goto err;
  1594. }
  1595. /* get IDMAC channels */
  1596. chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
  1597. chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
  1598. if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
  1599. dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
  1600. ret = -EBUSY;
  1601. goto err;
  1602. }
  1603. chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
  1604. chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
  1605. if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
  1606. dev_err(priv->ipu->dev,
  1607. "could not acquire idmac rotation channels\n");
  1608. ret = -EBUSY;
  1609. goto err;
  1610. }
  1611. /* acquire the EOF interrupts */
  1612. ret = get_eof_irq(chan, chan->in_chan);
  1613. if (ret < 0) {
  1614. chan->in_eof_irq = -1;
  1615. goto err;
  1616. }
  1617. chan->in_eof_irq = ret;
  1618. ret = get_eof_irq(chan, chan->rotation_in_chan);
  1619. if (ret < 0) {
  1620. chan->rot_in_eof_irq = -1;
  1621. goto err;
  1622. }
  1623. chan->rot_in_eof_irq = ret;
  1624. ret = get_eof_irq(chan, chan->out_chan);
  1625. if (ret < 0) {
  1626. chan->out_eof_irq = -1;
  1627. goto err;
  1628. }
  1629. chan->out_eof_irq = ret;
  1630. ret = get_eof_irq(chan, chan->rotation_out_chan);
  1631. if (ret < 0) {
  1632. chan->rot_out_eof_irq = -1;
  1633. goto err;
  1634. }
  1635. chan->rot_out_eof_irq = ret;
  1636. return 0;
  1637. err:
  1638. release_ipu_resources(chan);
  1639. return ret;
  1640. }
  1641. static int fill_image(struct ipu_image_convert_ctx *ctx,
  1642. struct ipu_image_convert_image *ic_image,
  1643. struct ipu_image *image,
  1644. enum ipu_image_convert_type type)
  1645. {
  1646. struct ipu_image_convert_priv *priv = ctx->chan->priv;
  1647. ic_image->base = *image;
  1648. ic_image->type = type;
  1649. ic_image->fmt = get_format(image->pix.pixelformat);
  1650. if (!ic_image->fmt) {
  1651. dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
  1652. type == IMAGE_CONVERT_OUT ? "Output" : "Input");
  1653. return -EINVAL;
  1654. }
  1655. if (ic_image->fmt->planar)
  1656. ic_image->stride = ic_image->base.pix.width;
  1657. else
  1658. ic_image->stride = ic_image->base.pix.bytesperline;
  1659. return 0;
  1660. }
  1661. /* borrowed from drivers/media/v4l2-core/v4l2-common.c */
  1662. static unsigned int clamp_align(unsigned int x, unsigned int min,
  1663. unsigned int max, unsigned int align)
  1664. {
  1665. /* Bits that must be zero to be aligned */
  1666. unsigned int mask = ~((1 << align) - 1);
  1667. /* Clamp to aligned min and max */
  1668. x = clamp(x, (min + ~mask) & mask, max & mask);
  1669. /* Round to nearest aligned value */
  1670. if (align)
  1671. x = (x + (1 << (align - 1))) & mask;
  1672. return x;
  1673. }
  1674. /* Adjusts input/output images to IPU restrictions */
  1675. void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
  1676. enum ipu_rotate_mode rot_mode)
  1677. {
  1678. const struct ipu_image_pixfmt *infmt, *outfmt;
  1679. u32 w_align_out, h_align_out;
  1680. u32 w_align_in, h_align_in;
  1681. infmt = get_format(in->pix.pixelformat);
  1682. outfmt = get_format(out->pix.pixelformat);
  1683. /* set some default pixel formats if needed */
  1684. if (!infmt) {
  1685. in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
  1686. infmt = get_format(V4L2_PIX_FMT_RGB24);
  1687. }
  1688. if (!outfmt) {
  1689. out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
  1690. outfmt = get_format(V4L2_PIX_FMT_RGB24);
  1691. }
  1692. /* image converter does not handle fields */
  1693. in->pix.field = out->pix.field = V4L2_FIELD_NONE;
  1694. /* resizer cannot downsize more than 4:1 */
  1695. if (ipu_rot_mode_is_irt(rot_mode)) {
  1696. out->pix.height = max_t(__u32, out->pix.height,
  1697. in->pix.width / 4);
  1698. out->pix.width = max_t(__u32, out->pix.width,
  1699. in->pix.height / 4);
  1700. } else {
  1701. out->pix.width = max_t(__u32, out->pix.width,
  1702. in->pix.width / 4);
  1703. out->pix.height = max_t(__u32, out->pix.height,
  1704. in->pix.height / 4);
  1705. }
  1706. /* align input width/height */
  1707. w_align_in = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt,
  1708. rot_mode));
  1709. h_align_in = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt,
  1710. rot_mode));
  1711. in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W,
  1712. w_align_in);
  1713. in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H,
  1714. h_align_in);
  1715. /* align output width/height */
  1716. w_align_out = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt,
  1717. rot_mode));
  1718. h_align_out = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt,
  1719. rot_mode));
  1720. out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W,
  1721. w_align_out);
  1722. out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H,
  1723. h_align_out);
  1724. /* set input/output strides and image sizes */
  1725. in->pix.bytesperline = infmt->planar ?
  1726. clamp_align(in->pix.width, 2 << w_align_in, MAX_W,
  1727. w_align_in) :
  1728. clamp_align((in->pix.width * infmt->bpp) >> 3,
  1729. ((2 << w_align_in) * infmt->bpp) >> 3,
  1730. (MAX_W * infmt->bpp) >> 3,
  1731. w_align_in);
  1732. in->pix.sizeimage = infmt->planar ?
  1733. (in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 :
  1734. in->pix.height * in->pix.bytesperline;
  1735. out->pix.bytesperline = outfmt->planar ? out->pix.width :
  1736. (out->pix.width * outfmt->bpp) >> 3;
  1737. out->pix.sizeimage = outfmt->planar ?
  1738. (out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 :
  1739. out->pix.height * out->pix.bytesperline;
  1740. }
  1741. EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
  1742. /*
  1743. * this is used by ipu_image_convert_prepare() to verify set input and
  1744. * output images are valid before starting the conversion. Clients can
  1745. * also call it before calling ipu_image_convert_prepare().
  1746. */
  1747. int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
  1748. enum ipu_rotate_mode rot_mode)
  1749. {
  1750. struct ipu_image testin, testout;
  1751. testin = *in;
  1752. testout = *out;
  1753. ipu_image_convert_adjust(&testin, &testout, rot_mode);
  1754. if (testin.pix.width != in->pix.width ||
  1755. testin.pix.height != in->pix.height ||
  1756. testout.pix.width != out->pix.width ||
  1757. testout.pix.height != out->pix.height)
  1758. return -EINVAL;
  1759. return 0;
  1760. }
  1761. EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
  1762. /*
  1763. * Call ipu_image_convert_prepare() to prepare for the conversion of
  1764. * given images and rotation mode. Returns a new conversion context.
  1765. */
  1766. struct ipu_image_convert_ctx *
  1767. ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  1768. struct ipu_image *in, struct ipu_image *out,
  1769. enum ipu_rotate_mode rot_mode,
  1770. ipu_image_convert_cb_t complete,
  1771. void *complete_context)
  1772. {
  1773. struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
  1774. struct ipu_image_convert_image *s_image, *d_image;
  1775. struct ipu_image_convert_chan *chan;
  1776. struct ipu_image_convert_ctx *ctx;
  1777. unsigned long flags;
  1778. unsigned int i;
  1779. bool get_res;
  1780. int ret;
  1781. if (!in || !out || !complete ||
  1782. (ic_task != IC_TASK_VIEWFINDER &&
  1783. ic_task != IC_TASK_POST_PROCESSOR))
  1784. return ERR_PTR(-EINVAL);
  1785. /* verify the in/out images before continuing */
  1786. ret = ipu_image_convert_verify(in, out, rot_mode);
  1787. if (ret) {
  1788. dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
  1789. __func__);
  1790. return ERR_PTR(ret);
  1791. }
  1792. chan = &priv->chan[ic_task];
  1793. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  1794. if (!ctx)
  1795. return ERR_PTR(-ENOMEM);
  1796. dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
  1797. chan->ic_task, ctx);
  1798. ctx->chan = chan;
  1799. init_completion(&ctx->aborted);
  1800. ctx->rot_mode = rot_mode;
  1801. /* Sets ctx->in.num_rows/cols as well */
  1802. ret = calc_image_resize_coefficients(ctx, in, out);
  1803. if (ret)
  1804. goto out_free;
  1805. s_image = &ctx->in;
  1806. d_image = &ctx->out;
  1807. /* set tiling and rotation */
  1808. if (ipu_rot_mode_is_irt(rot_mode)) {
  1809. d_image->num_rows = s_image->num_cols;
  1810. d_image->num_cols = s_image->num_rows;
  1811. } else {
  1812. d_image->num_rows = s_image->num_rows;
  1813. d_image->num_cols = s_image->num_cols;
  1814. }
  1815. ctx->num_tiles = d_image->num_cols * d_image->num_rows;
  1816. ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
  1817. if (ret)
  1818. goto out_free;
  1819. ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
  1820. if (ret)
  1821. goto out_free;
  1822. calc_out_tile_map(ctx);
  1823. find_seams(ctx, s_image, d_image);
  1824. ret = calc_tile_dimensions(ctx, s_image);
  1825. if (ret)
  1826. goto out_free;
  1827. ret = calc_tile_offsets(ctx, s_image);
  1828. if (ret)
  1829. goto out_free;
  1830. calc_tile_dimensions(ctx, d_image);
  1831. ret = calc_tile_offsets(ctx, d_image);
  1832. if (ret)
  1833. goto out_free;
  1834. calc_tile_resize_coefficients(ctx);
  1835. ret = ipu_ic_calc_csc(&ctx->csc,
  1836. s_image->base.pix.ycbcr_enc,
  1837. s_image->base.pix.quantization,
  1838. ipu_pixelformat_to_colorspace(s_image->fmt->fourcc),
  1839. d_image->base.pix.ycbcr_enc,
  1840. d_image->base.pix.quantization,
  1841. ipu_pixelformat_to_colorspace(d_image->fmt->fourcc));
  1842. if (ret)
  1843. goto out_free;
  1844. dump_format(ctx, s_image);
  1845. dump_format(ctx, d_image);
  1846. ctx->complete = complete;
  1847. ctx->complete_context = complete_context;
  1848. /*
  1849. * Can we use double-buffering for this operation? If there is
  1850. * only one tile (the whole image can be converted in a single
  1851. * operation) there's no point in using double-buffering. Also,
  1852. * the IPU's IDMAC channels allow only a single U and V plane
  1853. * offset shared between both buffers, but these offsets change
  1854. * for every tile, and therefore would have to be updated for
  1855. * each buffer which is not possible. So double-buffering is
  1856. * impossible when either the source or destination images are
  1857. * a planar format (YUV420, YUV422P, etc.). Further, differently
  1858. * sized tiles or different resizing coefficients per tile
  1859. * prevent double-buffering as well.
  1860. */
  1861. ctx->double_buffering = (ctx->num_tiles > 1 &&
  1862. !s_image->fmt->planar &&
  1863. !d_image->fmt->planar);
  1864. for (i = 1; i < ctx->num_tiles; i++) {
  1865. if (ctx->in.tile[i].width != ctx->in.tile[0].width ||
  1866. ctx->in.tile[i].height != ctx->in.tile[0].height ||
  1867. ctx->out.tile[i].width != ctx->out.tile[0].width ||
  1868. ctx->out.tile[i].height != ctx->out.tile[0].height) {
  1869. ctx->double_buffering = false;
  1870. break;
  1871. }
  1872. }
  1873. for (i = 1; i < ctx->in.num_cols; i++) {
  1874. if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) {
  1875. ctx->double_buffering = false;
  1876. break;
  1877. }
  1878. }
  1879. for (i = 1; i < ctx->in.num_rows; i++) {
  1880. if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) {
  1881. ctx->double_buffering = false;
  1882. break;
  1883. }
  1884. }
  1885. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1886. unsigned long intermediate_size = d_image->tile[0].size;
  1887. for (i = 1; i < ctx->num_tiles; i++) {
  1888. if (d_image->tile[i].size > intermediate_size)
  1889. intermediate_size = d_image->tile[i].size;
  1890. }
  1891. ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
  1892. intermediate_size);
  1893. if (ret)
  1894. goto out_free;
  1895. if (ctx->double_buffering) {
  1896. ret = alloc_dma_buf(priv,
  1897. &ctx->rot_intermediate[1],
  1898. intermediate_size);
  1899. if (ret)
  1900. goto out_free_dmabuf0;
  1901. }
  1902. }
  1903. spin_lock_irqsave(&chan->irqlock, flags);
  1904. get_res = list_empty(&chan->ctx_list);
  1905. list_add_tail(&ctx->list, &chan->ctx_list);
  1906. spin_unlock_irqrestore(&chan->irqlock, flags);
  1907. if (get_res) {
  1908. ret = get_ipu_resources(chan);
  1909. if (ret)
  1910. goto out_free_dmabuf1;
  1911. }
  1912. return ctx;
  1913. out_free_dmabuf1:
  1914. free_dma_buf(priv, &ctx->rot_intermediate[1]);
  1915. spin_lock_irqsave(&chan->irqlock, flags);
  1916. list_del(&ctx->list);
  1917. spin_unlock_irqrestore(&chan->irqlock, flags);
  1918. out_free_dmabuf0:
  1919. free_dma_buf(priv, &ctx->rot_intermediate[0]);
  1920. out_free:
  1921. kfree(ctx);
  1922. return ERR_PTR(ret);
  1923. }
  1924. EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
  1925. /*
  1926. * Carry out a single image conversion run. Only the physaddr's of the input
  1927. * and output image buffers are needed. The conversion context must have
  1928. * been created previously with ipu_image_convert_prepare().
  1929. */
  1930. int ipu_image_convert_queue(struct ipu_image_convert_run *run)
  1931. {
  1932. struct ipu_image_convert_chan *chan;
  1933. struct ipu_image_convert_priv *priv;
  1934. struct ipu_image_convert_ctx *ctx;
  1935. unsigned long flags;
  1936. int ret = 0;
  1937. if (!run || !run->ctx || !run->in_phys || !run->out_phys)
  1938. return -EINVAL;
  1939. ctx = run->ctx;
  1940. chan = ctx->chan;
  1941. priv = chan->priv;
  1942. dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
  1943. chan->ic_task, ctx, run);
  1944. INIT_LIST_HEAD(&run->list);
  1945. spin_lock_irqsave(&chan->irqlock, flags);
  1946. if (ctx->aborting) {
  1947. ret = -EIO;
  1948. goto unlock;
  1949. }
  1950. list_add_tail(&run->list, &chan->pending_q);
  1951. if (!chan->current_run) {
  1952. ret = do_run(run);
  1953. if (ret)
  1954. chan->current_run = NULL;
  1955. }
  1956. unlock:
  1957. spin_unlock_irqrestore(&chan->irqlock, flags);
  1958. return ret;
  1959. }
  1960. EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
  1961. /* Abort any active or pending conversions for this context */
  1962. static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
  1963. {
  1964. struct ipu_image_convert_chan *chan = ctx->chan;
  1965. struct ipu_image_convert_priv *priv = chan->priv;
  1966. struct ipu_image_convert_run *run, *active_run, *tmp;
  1967. unsigned long flags;
  1968. int run_count, ret;
  1969. spin_lock_irqsave(&chan->irqlock, flags);
  1970. /* move all remaining pending runs in this context to done_q */
  1971. list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
  1972. if (run->ctx != ctx)
  1973. continue;
  1974. run->status = -EIO;
  1975. list_move_tail(&run->list, &chan->done_q);
  1976. }
  1977. run_count = get_run_count(ctx, &chan->done_q);
  1978. active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
  1979. chan->current_run : NULL;
  1980. if (active_run)
  1981. reinit_completion(&ctx->aborted);
  1982. ctx->aborting = true;
  1983. spin_unlock_irqrestore(&chan->irqlock, flags);
  1984. if (!run_count && !active_run) {
  1985. dev_dbg(priv->ipu->dev,
  1986. "%s: task %u: no abort needed for ctx %p\n",
  1987. __func__, chan->ic_task, ctx);
  1988. return;
  1989. }
  1990. if (!active_run) {
  1991. empty_done_q(chan);
  1992. return;
  1993. }
  1994. dev_dbg(priv->ipu->dev,
  1995. "%s: task %u: wait for completion: %d runs\n",
  1996. __func__, chan->ic_task, run_count);
  1997. ret = wait_for_completion_timeout(&ctx->aborted,
  1998. msecs_to_jiffies(10000));
  1999. if (ret == 0) {
  2000. dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
  2001. force_abort(ctx);
  2002. }
  2003. }
  2004. void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
  2005. {
  2006. __ipu_image_convert_abort(ctx);
  2007. ctx->aborting = false;
  2008. }
  2009. EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
  2010. /* Unprepare image conversion context */
  2011. void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
  2012. {
  2013. struct ipu_image_convert_chan *chan = ctx->chan;
  2014. struct ipu_image_convert_priv *priv = chan->priv;
  2015. unsigned long flags;
  2016. bool put_res;
  2017. /* make sure no runs are hanging around */
  2018. __ipu_image_convert_abort(ctx);
  2019. dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
  2020. chan->ic_task, ctx);
  2021. spin_lock_irqsave(&chan->irqlock, flags);
  2022. list_del(&ctx->list);
  2023. put_res = list_empty(&chan->ctx_list);
  2024. spin_unlock_irqrestore(&chan->irqlock, flags);
  2025. if (put_res)
  2026. release_ipu_resources(chan);
  2027. free_dma_buf(priv, &ctx->rot_intermediate[1]);
  2028. free_dma_buf(priv, &ctx->rot_intermediate[0]);
  2029. kfree(ctx);
  2030. }
  2031. EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
  2032. /*
  2033. * "Canned" asynchronous single image conversion. Allocates and returns
  2034. * a new conversion run. On successful return the caller must free the
  2035. * run and call ipu_image_convert_unprepare() after conversion completes.
  2036. */
  2037. struct ipu_image_convert_run *
  2038. ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  2039. struct ipu_image *in, struct ipu_image *out,
  2040. enum ipu_rotate_mode rot_mode,
  2041. ipu_image_convert_cb_t complete,
  2042. void *complete_context)
  2043. {
  2044. struct ipu_image_convert_ctx *ctx;
  2045. struct ipu_image_convert_run *run;
  2046. int ret;
  2047. ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
  2048. complete, complete_context);
  2049. if (IS_ERR(ctx))
  2050. return ERR_CAST(ctx);
  2051. run = kzalloc(sizeof(*run), GFP_KERNEL);
  2052. if (!run) {
  2053. ipu_image_convert_unprepare(ctx);
  2054. return ERR_PTR(-ENOMEM);
  2055. }
  2056. run->ctx = ctx;
  2057. run->in_phys = in->phys0;
  2058. run->out_phys = out->phys0;
  2059. ret = ipu_image_convert_queue(run);
  2060. if (ret) {
  2061. ipu_image_convert_unprepare(ctx);
  2062. kfree(run);
  2063. return ERR_PTR(ret);
  2064. }
  2065. return run;
  2066. }
  2067. EXPORT_SYMBOL_GPL(ipu_image_convert);
  2068. /* "Canned" synchronous single image conversion */
  2069. static void image_convert_sync_complete(struct ipu_image_convert_run *run,
  2070. void *data)
  2071. {
  2072. struct completion *comp = data;
  2073. complete(comp);
  2074. }
  2075. int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  2076. struct ipu_image *in, struct ipu_image *out,
  2077. enum ipu_rotate_mode rot_mode)
  2078. {
  2079. struct ipu_image_convert_run *run;
  2080. struct completion comp;
  2081. int ret;
  2082. init_completion(&comp);
  2083. run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
  2084. image_convert_sync_complete, &comp);
  2085. if (IS_ERR(run))
  2086. return PTR_ERR(run);
  2087. ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
  2088. ret = (ret == 0) ? -ETIMEDOUT : 0;
  2089. ipu_image_convert_unprepare(run->ctx);
  2090. kfree(run);
  2091. return ret;
  2092. }
  2093. EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
  2094. int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
  2095. {
  2096. struct ipu_image_convert_priv *priv;
  2097. int i;
  2098. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  2099. if (!priv)
  2100. return -ENOMEM;
  2101. ipu->image_convert_priv = priv;
  2102. priv->ipu = ipu;
  2103. for (i = 0; i < IC_NUM_TASKS; i++) {
  2104. struct ipu_image_convert_chan *chan = &priv->chan[i];
  2105. chan->ic_task = i;
  2106. chan->priv = priv;
  2107. chan->dma_ch = &image_convert_dma_chan[i];
  2108. chan->in_eof_irq = -1;
  2109. chan->rot_in_eof_irq = -1;
  2110. chan->out_eof_irq = -1;
  2111. chan->rot_out_eof_irq = -1;
  2112. spin_lock_init(&chan->irqlock);
  2113. INIT_LIST_HEAD(&chan->ctx_list);
  2114. INIT_LIST_HEAD(&chan->pending_q);
  2115. INIT_LIST_HEAD(&chan->done_q);
  2116. }
  2117. return 0;
  2118. }
  2119. void ipu_image_convert_exit(struct ipu_soc *ipu)
  2120. {
  2121. }