pcm_native.c 113 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Digital Audio (PCM) abstract layer
  4. * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
  5. */
  6. #include <linux/compat.h>
  7. #include <linux/mm.h>
  8. #include <linux/module.h>
  9. #include <linux/file.h>
  10. #include <linux/slab.h>
  11. #include <linux/sched/signal.h>
  12. #include <linux/time.h>
  13. #include <linux/pm_qos.h>
  14. #include <linux/io.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/vmalloc.h>
  17. #include <sound/core.h>
  18. #include <sound/control.h>
  19. #include <sound/info.h>
  20. #include <sound/pcm.h>
  21. #include <sound/pcm_params.h>
  22. #include <sound/timer.h>
  23. #include <sound/minors.h>
  24. #include <linux/uio.h>
  25. #include <linux/delay.h>
  26. #include "pcm_local.h"
  27. #ifdef CONFIG_SND_DEBUG
  28. #define CREATE_TRACE_POINTS
  29. #include "pcm_param_trace.h"
  30. #else
  31. #define trace_hw_mask_param_enabled() 0
  32. #define trace_hw_interval_param_enabled() 0
  33. #define trace_hw_mask_param(substream, type, index, prev, curr)
  34. #define trace_hw_interval_param(substream, type, index, prev, curr)
  35. #endif
  36. /*
  37. * Compatibility
  38. */
  39. struct snd_pcm_hw_params_old {
  40. unsigned int flags;
  41. unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT -
  42. SNDRV_PCM_HW_PARAM_ACCESS + 1];
  43. struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME -
  44. SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1];
  45. unsigned int rmask;
  46. unsigned int cmask;
  47. unsigned int info;
  48. unsigned int msbits;
  49. unsigned int rate_num;
  50. unsigned int rate_den;
  51. snd_pcm_uframes_t fifo_size;
  52. unsigned char reserved[64];
  53. };
  54. #ifdef CONFIG_SND_SUPPORT_OLD_API
  55. #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old)
  56. #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old)
  57. static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
  58. struct snd_pcm_hw_params_old __user * _oparams);
  59. static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
  60. struct snd_pcm_hw_params_old __user * _oparams);
  61. #endif
  62. static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
  63. /*
  64. *
  65. */
  66. static DECLARE_RWSEM(snd_pcm_link_rwsem);
  67. void snd_pcm_group_init(struct snd_pcm_group *group)
  68. {
  69. spin_lock_init(&group->lock);
  70. mutex_init(&group->mutex);
  71. INIT_LIST_HEAD(&group->substreams);
  72. refcount_set(&group->refs, 1);
  73. }
  74. /* define group lock helpers */
  75. #define DEFINE_PCM_GROUP_LOCK(action, mutex_action) \
  76. static void snd_pcm_group_ ## action(struct snd_pcm_group *group, bool nonatomic) \
  77. { \
  78. if (nonatomic) \
  79. mutex_ ## mutex_action(&group->mutex); \
  80. else \
  81. spin_ ## action(&group->lock); \
  82. }
  83. DEFINE_PCM_GROUP_LOCK(lock, lock);
  84. DEFINE_PCM_GROUP_LOCK(unlock, unlock);
  85. DEFINE_PCM_GROUP_LOCK(lock_irq, lock);
  86. DEFINE_PCM_GROUP_LOCK(unlock_irq, unlock);
  87. /**
  88. * snd_pcm_stream_lock - Lock the PCM stream
  89. * @substream: PCM substream
  90. *
  91. * This locks the PCM stream's spinlock or mutex depending on the nonatomic
  92. * flag of the given substream. This also takes the global link rw lock
  93. * (or rw sem), too, for avoiding the race with linked streams.
  94. */
  95. void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
  96. {
  97. snd_pcm_group_lock(&substream->self_group, substream->pcm->nonatomic);
  98. }
  99. EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
  100. /**
  101. * snd_pcm_stream_unlock - Unlock the PCM stream
  102. * @substream: PCM substream
  103. *
  104. * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
  105. */
  106. void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
  107. {
  108. snd_pcm_group_unlock(&substream->self_group, substream->pcm->nonatomic);
  109. }
  110. EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
  111. /**
  112. * snd_pcm_stream_lock_irq - Lock the PCM stream
  113. * @substream: PCM substream
  114. *
  115. * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
  116. * IRQ (only when nonatomic is false). In nonatomic case, this is identical
  117. * as snd_pcm_stream_lock().
  118. */
  119. void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
  120. {
  121. snd_pcm_group_lock_irq(&substream->self_group,
  122. substream->pcm->nonatomic);
  123. }
  124. EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
  125. static void snd_pcm_stream_lock_nested(struct snd_pcm_substream *substream)
  126. {
  127. struct snd_pcm_group *group = &substream->self_group;
  128. if (substream->pcm->nonatomic)
  129. mutex_lock_nested(&group->mutex, SINGLE_DEPTH_NESTING);
  130. else
  131. spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING);
  132. }
  133. /**
  134. * snd_pcm_stream_unlock_irq - Unlock the PCM stream
  135. * @substream: PCM substream
  136. *
  137. * This is a counter-part of snd_pcm_stream_lock_irq().
  138. */
  139. void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
  140. {
  141. snd_pcm_group_unlock_irq(&substream->self_group,
  142. substream->pcm->nonatomic);
  143. }
  144. EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
  145. unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
  146. {
  147. unsigned long flags = 0;
  148. if (substream->pcm->nonatomic)
  149. mutex_lock(&substream->self_group.mutex);
  150. else
  151. spin_lock_irqsave(&substream->self_group.lock, flags);
  152. return flags;
  153. }
  154. EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
  155. /**
  156. * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
  157. * @substream: PCM substream
  158. * @flags: irq flags
  159. *
  160. * This is a counter-part of snd_pcm_stream_lock_irqsave().
  161. */
  162. void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
  163. unsigned long flags)
  164. {
  165. if (substream->pcm->nonatomic)
  166. mutex_unlock(&substream->self_group.mutex);
  167. else
  168. spin_unlock_irqrestore(&substream->self_group.lock, flags);
  169. }
  170. EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
  171. /* Run PCM ioctl ops */
  172. static int snd_pcm_ops_ioctl(struct snd_pcm_substream *substream,
  173. unsigned cmd, void *arg)
  174. {
  175. if (substream->ops->ioctl)
  176. return substream->ops->ioctl(substream, cmd, arg);
  177. else
  178. return snd_pcm_lib_ioctl(substream, cmd, arg);
  179. }
  180. int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
  181. {
  182. struct snd_pcm *pcm = substream->pcm;
  183. struct snd_pcm_str *pstr = substream->pstr;
  184. memset(info, 0, sizeof(*info));
  185. info->card = pcm->card->number;
  186. info->device = pcm->device;
  187. info->stream = substream->stream;
  188. info->subdevice = substream->number;
  189. strlcpy(info->id, pcm->id, sizeof(info->id));
  190. strlcpy(info->name, pcm->name, sizeof(info->name));
  191. info->dev_class = pcm->dev_class;
  192. info->dev_subclass = pcm->dev_subclass;
  193. info->subdevices_count = pstr->substream_count;
  194. info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
  195. strlcpy(info->subname, substream->name, sizeof(info->subname));
  196. return 0;
  197. }
  198. int snd_pcm_info_user(struct snd_pcm_substream *substream,
  199. struct snd_pcm_info __user * _info)
  200. {
  201. struct snd_pcm_info *info;
  202. int err;
  203. info = kmalloc(sizeof(*info), GFP_KERNEL);
  204. if (! info)
  205. return -ENOMEM;
  206. err = snd_pcm_info(substream, info);
  207. if (err >= 0) {
  208. if (copy_to_user(_info, info, sizeof(*info)))
  209. err = -EFAULT;
  210. }
  211. kfree(info);
  212. return err;
  213. }
  214. /* macro for simplified cast */
  215. #define PARAM_MASK_BIT(b) (1U << (__force int)(b))
  216. static bool hw_support_mmap(struct snd_pcm_substream *substream)
  217. {
  218. if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
  219. return false;
  220. if (substream->ops->mmap || substream->ops->page)
  221. return true;
  222. switch (substream->dma_buffer.dev.type) {
  223. case SNDRV_DMA_TYPE_UNKNOWN:
  224. /* we can't know the device, so just assume that the driver does
  225. * everything right
  226. */
  227. return true;
  228. case SNDRV_DMA_TYPE_CONTINUOUS:
  229. case SNDRV_DMA_TYPE_VMALLOC:
  230. return true;
  231. default:
  232. return dma_can_mmap(substream->dma_buffer.dev.dev);
  233. }
  234. }
  235. static int constrain_mask_params(struct snd_pcm_substream *substream,
  236. struct snd_pcm_hw_params *params)
  237. {
  238. struct snd_pcm_hw_constraints *constrs =
  239. &substream->runtime->hw_constraints;
  240. struct snd_mask *m;
  241. unsigned int k;
  242. struct snd_mask old_mask;
  243. int changed;
  244. for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
  245. m = hw_param_mask(params, k);
  246. if (snd_mask_empty(m))
  247. return -EINVAL;
  248. /* This parameter is not requested to change by a caller. */
  249. if (!(params->rmask & PARAM_MASK_BIT(k)))
  250. continue;
  251. if (trace_hw_mask_param_enabled())
  252. old_mask = *m;
  253. changed = snd_mask_refine(m, constrs_mask(constrs, k));
  254. if (changed < 0)
  255. return changed;
  256. if (changed == 0)
  257. continue;
  258. /* Set corresponding flag so that the caller gets it. */
  259. trace_hw_mask_param(substream, k, 0, &old_mask, m);
  260. params->cmask |= PARAM_MASK_BIT(k);
  261. }
  262. return 0;
  263. }
  264. static int constrain_interval_params(struct snd_pcm_substream *substream,
  265. struct snd_pcm_hw_params *params)
  266. {
  267. struct snd_pcm_hw_constraints *constrs =
  268. &substream->runtime->hw_constraints;
  269. struct snd_interval *i;
  270. unsigned int k;
  271. struct snd_interval old_interval;
  272. int changed;
  273. for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
  274. i = hw_param_interval(params, k);
  275. if (snd_interval_empty(i))
  276. return -EINVAL;
  277. /* This parameter is not requested to change by a caller. */
  278. if (!(params->rmask & PARAM_MASK_BIT(k)))
  279. continue;
  280. if (trace_hw_interval_param_enabled())
  281. old_interval = *i;
  282. changed = snd_interval_refine(i, constrs_interval(constrs, k));
  283. if (changed < 0)
  284. return changed;
  285. if (changed == 0)
  286. continue;
  287. /* Set corresponding flag so that the caller gets it. */
  288. trace_hw_interval_param(substream, k, 0, &old_interval, i);
  289. params->cmask |= PARAM_MASK_BIT(k);
  290. }
  291. return 0;
  292. }
  293. static int constrain_params_by_rules(struct snd_pcm_substream *substream,
  294. struct snd_pcm_hw_params *params)
  295. {
  296. struct snd_pcm_hw_constraints *constrs =
  297. &substream->runtime->hw_constraints;
  298. unsigned int k;
  299. unsigned int *rstamps;
  300. unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
  301. unsigned int stamp;
  302. struct snd_pcm_hw_rule *r;
  303. unsigned int d;
  304. struct snd_mask old_mask;
  305. struct snd_interval old_interval;
  306. bool again;
  307. int changed, err = 0;
  308. /*
  309. * Each application of rule has own sequence number.
  310. *
  311. * Each member of 'rstamps' array represents the sequence number of
  312. * recent application of corresponding rule.
  313. */
  314. rstamps = kcalloc(constrs->rules_num, sizeof(unsigned int), GFP_KERNEL);
  315. if (!rstamps)
  316. return -ENOMEM;
  317. /*
  318. * Each member of 'vstamps' array represents the sequence number of
  319. * recent application of rule in which corresponding parameters were
  320. * changed.
  321. *
  322. * In initial state, elements corresponding to parameters requested by
  323. * a caller is 1. For unrequested parameters, corresponding members
  324. * have 0 so that the parameters are never changed anymore.
  325. */
  326. for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
  327. vstamps[k] = (params->rmask & PARAM_MASK_BIT(k)) ? 1 : 0;
  328. /* Due to the above design, actual sequence number starts at 2. */
  329. stamp = 2;
  330. retry:
  331. /* Apply all rules in order. */
  332. again = false;
  333. for (k = 0; k < constrs->rules_num; k++) {
  334. r = &constrs->rules[k];
  335. /*
  336. * Check condition bits of this rule. When the rule has
  337. * some condition bits, parameter without the bits is
  338. * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
  339. * is an example of the condition bits.
  340. */
  341. if (r->cond && !(r->cond & params->flags))
  342. continue;
  343. /*
  344. * The 'deps' array includes maximum three dependencies
  345. * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
  346. * member of this array is a sentinel and should be
  347. * negative value.
  348. *
  349. * This rule should be processed in this time when dependent
  350. * parameters were changed at former applications of the other
  351. * rules.
  352. */
  353. for (d = 0; r->deps[d] >= 0; d++) {
  354. if (vstamps[r->deps[d]] > rstamps[k])
  355. break;
  356. }
  357. if (r->deps[d] < 0)
  358. continue;
  359. if (trace_hw_mask_param_enabled()) {
  360. if (hw_is_mask(r->var))
  361. old_mask = *hw_param_mask(params, r->var);
  362. }
  363. if (trace_hw_interval_param_enabled()) {
  364. if (hw_is_interval(r->var))
  365. old_interval = *hw_param_interval(params, r->var);
  366. }
  367. changed = r->func(params, r);
  368. if (changed < 0) {
  369. err = changed;
  370. goto out;
  371. }
  372. /*
  373. * When the parameter is changed, notify it to the caller
  374. * by corresponding returned bit, then preparing for next
  375. * iteration.
  376. */
  377. if (changed && r->var >= 0) {
  378. if (hw_is_mask(r->var)) {
  379. trace_hw_mask_param(substream, r->var,
  380. k + 1, &old_mask,
  381. hw_param_mask(params, r->var));
  382. }
  383. if (hw_is_interval(r->var)) {
  384. trace_hw_interval_param(substream, r->var,
  385. k + 1, &old_interval,
  386. hw_param_interval(params, r->var));
  387. }
  388. params->cmask |= PARAM_MASK_BIT(r->var);
  389. vstamps[r->var] = stamp;
  390. again = true;
  391. }
  392. rstamps[k] = stamp++;
  393. }
  394. /* Iterate to evaluate all rules till no parameters are changed. */
  395. if (again)
  396. goto retry;
  397. out:
  398. kfree(rstamps);
  399. return err;
  400. }
  401. static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
  402. struct snd_pcm_hw_params *params)
  403. {
  404. const struct snd_interval *i;
  405. const struct snd_mask *m;
  406. int err;
  407. if (!params->msbits) {
  408. i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
  409. if (snd_interval_single(i))
  410. params->msbits = snd_interval_value(i);
  411. }
  412. if (!params->rate_den) {
  413. i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
  414. if (snd_interval_single(i)) {
  415. params->rate_num = snd_interval_value(i);
  416. params->rate_den = 1;
  417. }
  418. }
  419. if (!params->fifo_size) {
  420. m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
  421. i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
  422. if (snd_mask_single(m) && snd_interval_single(i)) {
  423. err = snd_pcm_ops_ioctl(substream,
  424. SNDRV_PCM_IOCTL1_FIFO_SIZE,
  425. params);
  426. if (err < 0)
  427. return err;
  428. }
  429. }
  430. if (!params->info) {
  431. params->info = substream->runtime->hw.info;
  432. params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
  433. SNDRV_PCM_INFO_DRAIN_TRIGGER);
  434. if (!hw_support_mmap(substream))
  435. params->info &= ~(SNDRV_PCM_INFO_MMAP |
  436. SNDRV_PCM_INFO_MMAP_VALID);
  437. }
  438. return 0;
  439. }
  440. int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
  441. struct snd_pcm_hw_params *params)
  442. {
  443. int err;
  444. params->info = 0;
  445. params->fifo_size = 0;
  446. if (params->rmask & PARAM_MASK_BIT(SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
  447. params->msbits = 0;
  448. if (params->rmask & PARAM_MASK_BIT(SNDRV_PCM_HW_PARAM_RATE)) {
  449. params->rate_num = 0;
  450. params->rate_den = 0;
  451. }
  452. err = constrain_mask_params(substream, params);
  453. if (err < 0)
  454. return err;
  455. err = constrain_interval_params(substream, params);
  456. if (err < 0)
  457. return err;
  458. err = constrain_params_by_rules(substream, params);
  459. if (err < 0)
  460. return err;
  461. params->rmask = 0;
  462. return 0;
  463. }
  464. EXPORT_SYMBOL(snd_pcm_hw_refine);
  465. static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
  466. struct snd_pcm_hw_params __user * _params)
  467. {
  468. struct snd_pcm_hw_params *params;
  469. int err;
  470. params = memdup_user(_params, sizeof(*params));
  471. if (IS_ERR(params))
  472. return PTR_ERR(params);
  473. err = snd_pcm_hw_refine(substream, params);
  474. if (err < 0)
  475. goto end;
  476. err = fixup_unreferenced_params(substream, params);
  477. if (err < 0)
  478. goto end;
  479. if (copy_to_user(_params, params, sizeof(*params)))
  480. err = -EFAULT;
  481. end:
  482. kfree(params);
  483. return err;
  484. }
  485. static int period_to_usecs(struct snd_pcm_runtime *runtime)
  486. {
  487. int usecs;
  488. if (! runtime->rate)
  489. return -1; /* invalid */
  490. /* take 75% of period time as the deadline */
  491. usecs = (750000 / runtime->rate) * runtime->period_size;
  492. usecs += ((750000 % runtime->rate) * runtime->period_size) /
  493. runtime->rate;
  494. return usecs;
  495. }
  496. static void snd_pcm_set_state(struct snd_pcm_substream *substream,
  497. snd_pcm_state_t state)
  498. {
  499. snd_pcm_stream_lock_irq(substream);
  500. if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
  501. substream->runtime->status->state = state;
  502. snd_pcm_stream_unlock_irq(substream);
  503. }
  504. static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
  505. int event)
  506. {
  507. #ifdef CONFIG_SND_PCM_TIMER
  508. if (substream->timer)
  509. snd_timer_notify(substream->timer, event,
  510. &substream->runtime->trigger_tstamp);
  511. #endif
  512. }
  513. void snd_pcm_sync_stop(struct snd_pcm_substream *substream, bool sync_irq)
  514. {
  515. if (substream->runtime && substream->runtime->stop_operating) {
  516. substream->runtime->stop_operating = false;
  517. if (substream->ops && substream->ops->sync_stop)
  518. substream->ops->sync_stop(substream);
  519. else if (sync_irq && substream->pcm->card->sync_irq > 0)
  520. synchronize_irq(substream->pcm->card->sync_irq);
  521. }
  522. }
  523. /**
  524. * snd_pcm_hw_params_choose - choose a configuration defined by @params
  525. * @pcm: PCM instance
  526. * @params: the hw_params instance
  527. *
  528. * Choose one configuration from configuration space defined by @params.
  529. * The configuration chosen is that obtained fixing in this order:
  530. * first access, first format, first subformat, min channels,
  531. * min rate, min period time, max buffer size, min tick time
  532. *
  533. * Return: Zero if successful, or a negative error code on failure.
  534. */
  535. static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
  536. struct snd_pcm_hw_params *params)
  537. {
  538. static const int vars[] = {
  539. SNDRV_PCM_HW_PARAM_ACCESS,
  540. SNDRV_PCM_HW_PARAM_FORMAT,
  541. SNDRV_PCM_HW_PARAM_SUBFORMAT,
  542. SNDRV_PCM_HW_PARAM_CHANNELS,
  543. SNDRV_PCM_HW_PARAM_RATE,
  544. SNDRV_PCM_HW_PARAM_PERIOD_TIME,
  545. SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
  546. SNDRV_PCM_HW_PARAM_TICK_TIME,
  547. -1
  548. };
  549. const int *v;
  550. struct snd_mask old_mask;
  551. struct snd_interval old_interval;
  552. int changed;
  553. for (v = vars; *v != -1; v++) {
  554. /* Keep old parameter to trace. */
  555. if (trace_hw_mask_param_enabled()) {
  556. if (hw_is_mask(*v))
  557. old_mask = *hw_param_mask(params, *v);
  558. }
  559. if (trace_hw_interval_param_enabled()) {
  560. if (hw_is_interval(*v))
  561. old_interval = *hw_param_interval(params, *v);
  562. }
  563. if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
  564. changed = snd_pcm_hw_param_first(pcm, params, *v, NULL);
  565. else
  566. changed = snd_pcm_hw_param_last(pcm, params, *v, NULL);
  567. if (changed < 0)
  568. return changed;
  569. if (changed == 0)
  570. continue;
  571. /* Trace the changed parameter. */
  572. if (hw_is_mask(*v)) {
  573. trace_hw_mask_param(pcm, *v, 0, &old_mask,
  574. hw_param_mask(params, *v));
  575. }
  576. if (hw_is_interval(*v)) {
  577. trace_hw_interval_param(pcm, *v, 0, &old_interval,
  578. hw_param_interval(params, *v));
  579. }
  580. }
  581. return 0;
  582. }
  583. /* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise
  584. * block the further r/w operations
  585. */
  586. static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime)
  587. {
  588. if (!atomic_dec_unless_positive(&runtime->buffer_accessing))
  589. return -EBUSY;
  590. mutex_lock(&runtime->buffer_mutex);
  591. return 0; /* keep buffer_mutex, unlocked by below */
  592. }
  593. /* release buffer_mutex and clear r/w access flag */
  594. static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
  595. {
  596. mutex_unlock(&runtime->buffer_mutex);
  597. atomic_inc(&runtime->buffer_accessing);
  598. }
  599. #if IS_ENABLED(CONFIG_SND_PCM_OSS)
  600. #define is_oss_stream(substream) ((substream)->oss.oss)
  601. #else
  602. #define is_oss_stream(substream) false
  603. #endif
  604. static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
  605. struct snd_pcm_hw_params *params)
  606. {
  607. struct snd_pcm_runtime *runtime;
  608. int err, usecs;
  609. unsigned int bits;
  610. snd_pcm_uframes_t frames;
  611. if (PCM_RUNTIME_CHECK(substream))
  612. return -ENXIO;
  613. runtime = substream->runtime;
  614. err = snd_pcm_buffer_access_lock(runtime);
  615. if (err < 0)
  616. return err;
  617. snd_pcm_stream_lock_irq(substream);
  618. switch (runtime->status->state) {
  619. case SNDRV_PCM_STATE_OPEN:
  620. case SNDRV_PCM_STATE_SETUP:
  621. case SNDRV_PCM_STATE_PREPARED:
  622. if (!is_oss_stream(substream) &&
  623. atomic_read(&substream->mmap_count))
  624. err = -EBADFD;
  625. break;
  626. default:
  627. err = -EBADFD;
  628. break;
  629. }
  630. snd_pcm_stream_unlock_irq(substream);
  631. if (err)
  632. goto unlock;
  633. snd_pcm_sync_stop(substream, true);
  634. params->rmask = ~0U;
  635. err = snd_pcm_hw_refine(substream, params);
  636. if (err < 0)
  637. goto _error;
  638. err = snd_pcm_hw_params_choose(substream, params);
  639. if (err < 0)
  640. goto _error;
  641. err = fixup_unreferenced_params(substream, params);
  642. if (err < 0)
  643. goto _error;
  644. if (substream->managed_buffer_alloc) {
  645. err = snd_pcm_lib_malloc_pages(substream,
  646. params_buffer_bytes(params));
  647. if (err < 0)
  648. goto _error;
  649. runtime->buffer_changed = err > 0;
  650. }
  651. if (substream->ops->hw_params != NULL) {
  652. err = substream->ops->hw_params(substream, params);
  653. if (err < 0)
  654. goto _error;
  655. }
  656. runtime->access = params_access(params);
  657. runtime->format = params_format(params);
  658. runtime->subformat = params_subformat(params);
  659. runtime->channels = params_channels(params);
  660. runtime->rate = params_rate(params);
  661. runtime->period_size = params_period_size(params);
  662. runtime->periods = params_periods(params);
  663. runtime->buffer_size = params_buffer_size(params);
  664. runtime->info = params->info;
  665. runtime->rate_num = params->rate_num;
  666. runtime->rate_den = params->rate_den;
  667. runtime->no_period_wakeup =
  668. (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
  669. (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
  670. bits = snd_pcm_format_physical_width(runtime->format);
  671. runtime->sample_bits = bits;
  672. bits *= runtime->channels;
  673. runtime->frame_bits = bits;
  674. frames = 1;
  675. while (bits % 8 != 0) {
  676. bits *= 2;
  677. frames *= 2;
  678. }
  679. runtime->byte_align = bits / 8;
  680. runtime->min_align = frames;
  681. /* Default sw params */
  682. runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE;
  683. runtime->period_step = 1;
  684. runtime->control->avail_min = runtime->period_size;
  685. runtime->start_threshold = 1;
  686. runtime->stop_threshold = runtime->buffer_size;
  687. runtime->silence_threshold = 0;
  688. runtime->silence_size = 0;
  689. runtime->boundary = runtime->buffer_size;
  690. while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
  691. runtime->boundary *= 2;
  692. /* clear the buffer for avoiding possible kernel info leaks */
  693. if (runtime->dma_area && !substream->ops->copy_user) {
  694. size_t size = runtime->dma_bytes;
  695. if (runtime->info & SNDRV_PCM_INFO_MMAP)
  696. size = PAGE_ALIGN(size);
  697. memset(runtime->dma_area, 0, size);
  698. }
  699. snd_pcm_timer_resolution_change(substream);
  700. snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
  701. if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
  702. cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
  703. if ((usecs = period_to_usecs(runtime)) >= 0)
  704. cpu_latency_qos_add_request(&substream->latency_pm_qos_req,
  705. usecs);
  706. err = 0;
  707. _error:
  708. if (err) {
  709. /* hardware might be unusable from this time,
  710. * so we force application to retry to set
  711. * the correct hardware parameter settings
  712. */
  713. snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
  714. if (substream->ops->hw_free != NULL)
  715. substream->ops->hw_free(substream);
  716. if (substream->managed_buffer_alloc)
  717. snd_pcm_lib_free_pages(substream);
  718. }
  719. unlock:
  720. snd_pcm_buffer_access_unlock(runtime);
  721. return err;
  722. }
  723. static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
  724. struct snd_pcm_hw_params __user * _params)
  725. {
  726. struct snd_pcm_hw_params *params;
  727. int err;
  728. params = memdup_user(_params, sizeof(*params));
  729. if (IS_ERR(params))
  730. return PTR_ERR(params);
  731. err = snd_pcm_hw_params(substream, params);
  732. if (err < 0)
  733. goto end;
  734. if (copy_to_user(_params, params, sizeof(*params)))
  735. err = -EFAULT;
  736. end:
  737. kfree(params);
  738. return err;
  739. }
  740. static int do_hw_free(struct snd_pcm_substream *substream)
  741. {
  742. int result = 0;
  743. snd_pcm_sync_stop(substream, true);
  744. if (substream->ops->hw_free)
  745. result = substream->ops->hw_free(substream);
  746. if (substream->managed_buffer_alloc)
  747. snd_pcm_lib_free_pages(substream);
  748. return result;
  749. }
  750. static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
  751. {
  752. struct snd_pcm_runtime *runtime;
  753. int result = 0;
  754. if (PCM_RUNTIME_CHECK(substream))
  755. return -ENXIO;
  756. runtime = substream->runtime;
  757. result = snd_pcm_buffer_access_lock(runtime);
  758. if (result < 0)
  759. return result;
  760. snd_pcm_stream_lock_irq(substream);
  761. switch (runtime->status->state) {
  762. case SNDRV_PCM_STATE_SETUP:
  763. case SNDRV_PCM_STATE_PREPARED:
  764. if (atomic_read(&substream->mmap_count))
  765. result = -EBADFD;
  766. break;
  767. default:
  768. result = -EBADFD;
  769. break;
  770. }
  771. snd_pcm_stream_unlock_irq(substream);
  772. if (result)
  773. goto unlock;
  774. result = do_hw_free(substream);
  775. snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
  776. cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
  777. unlock:
  778. snd_pcm_buffer_access_unlock(runtime);
  779. return result;
  780. }
  781. static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
  782. struct snd_pcm_sw_params *params)
  783. {
  784. struct snd_pcm_runtime *runtime;
  785. int err;
  786. if (PCM_RUNTIME_CHECK(substream))
  787. return -ENXIO;
  788. runtime = substream->runtime;
  789. snd_pcm_stream_lock_irq(substream);
  790. if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
  791. snd_pcm_stream_unlock_irq(substream);
  792. return -EBADFD;
  793. }
  794. snd_pcm_stream_unlock_irq(substream);
  795. if (params->tstamp_mode < 0 ||
  796. params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST)
  797. return -EINVAL;
  798. if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) &&
  799. params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST)
  800. return -EINVAL;
  801. if (params->avail_min == 0)
  802. return -EINVAL;
  803. if (params->silence_size >= runtime->boundary) {
  804. if (params->silence_threshold != 0)
  805. return -EINVAL;
  806. } else {
  807. if (params->silence_size > params->silence_threshold)
  808. return -EINVAL;
  809. if (params->silence_threshold > runtime->buffer_size)
  810. return -EINVAL;
  811. }
  812. err = 0;
  813. snd_pcm_stream_lock_irq(substream);
  814. runtime->tstamp_mode = params->tstamp_mode;
  815. if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12))
  816. runtime->tstamp_type = params->tstamp_type;
  817. runtime->period_step = params->period_step;
  818. runtime->control->avail_min = params->avail_min;
  819. runtime->start_threshold = params->start_threshold;
  820. runtime->stop_threshold = params->stop_threshold;
  821. runtime->silence_threshold = params->silence_threshold;
  822. runtime->silence_size = params->silence_size;
  823. params->boundary = runtime->boundary;
  824. if (snd_pcm_running(substream)) {
  825. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
  826. runtime->silence_size > 0)
  827. snd_pcm_playback_silence(substream, ULONG_MAX);
  828. err = snd_pcm_update_state(substream, runtime);
  829. }
  830. snd_pcm_stream_unlock_irq(substream);
  831. return err;
  832. }
  833. static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream,
  834. struct snd_pcm_sw_params __user * _params)
  835. {
  836. struct snd_pcm_sw_params params;
  837. int err;
  838. if (copy_from_user(&params, _params, sizeof(params)))
  839. return -EFAULT;
  840. err = snd_pcm_sw_params(substream, &params);
  841. if (copy_to_user(_params, &params, sizeof(params)))
  842. return -EFAULT;
  843. return err;
  844. }
  845. static inline snd_pcm_uframes_t
  846. snd_pcm_calc_delay(struct snd_pcm_substream *substream)
  847. {
  848. snd_pcm_uframes_t delay;
  849. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
  850. delay = snd_pcm_playback_hw_avail(substream->runtime);
  851. else
  852. delay = snd_pcm_capture_avail(substream->runtime);
  853. return delay + substream->runtime->delay;
  854. }
  855. int snd_pcm_status64(struct snd_pcm_substream *substream,
  856. struct snd_pcm_status64 *status)
  857. {
  858. struct snd_pcm_runtime *runtime = substream->runtime;
  859. snd_pcm_stream_lock_irq(substream);
  860. snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data,
  861. &runtime->audio_tstamp_config);
  862. /* backwards compatible behavior */
  863. if (runtime->audio_tstamp_config.type_requested ==
  864. SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) {
  865. if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)
  866. runtime->audio_tstamp_config.type_requested =
  867. SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
  868. else
  869. runtime->audio_tstamp_config.type_requested =
  870. SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
  871. runtime->audio_tstamp_report.valid = 0;
  872. } else
  873. runtime->audio_tstamp_report.valid = 1;
  874. status->state = runtime->status->state;
  875. status->suspended_state = runtime->status->suspended_state;
  876. if (status->state == SNDRV_PCM_STATE_OPEN)
  877. goto _end;
  878. status->trigger_tstamp_sec = runtime->trigger_tstamp.tv_sec;
  879. status->trigger_tstamp_nsec = runtime->trigger_tstamp.tv_nsec;
  880. if (snd_pcm_running(substream)) {
  881. snd_pcm_update_hw_ptr(substream);
  882. if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
  883. status->tstamp_sec = runtime->status->tstamp.tv_sec;
  884. status->tstamp_nsec =
  885. runtime->status->tstamp.tv_nsec;
  886. status->driver_tstamp_sec =
  887. runtime->driver_tstamp.tv_sec;
  888. status->driver_tstamp_nsec =
  889. runtime->driver_tstamp.tv_nsec;
  890. status->audio_tstamp_sec =
  891. runtime->status->audio_tstamp.tv_sec;
  892. status->audio_tstamp_nsec =
  893. runtime->status->audio_tstamp.tv_nsec;
  894. if (runtime->audio_tstamp_report.valid == 1)
  895. /* backwards compatibility, no report provided in COMPAT mode */
  896. snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data,
  897. &status->audio_tstamp_accuracy,
  898. &runtime->audio_tstamp_report);
  899. goto _tstamp_end;
  900. }
  901. } else {
  902. /* get tstamp only in fallback mode and only if enabled */
  903. if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
  904. struct timespec64 tstamp;
  905. snd_pcm_gettime(runtime, &tstamp);
  906. status->tstamp_sec = tstamp.tv_sec;
  907. status->tstamp_nsec = tstamp.tv_nsec;
  908. }
  909. }
  910. _tstamp_end:
  911. status->appl_ptr = runtime->control->appl_ptr;
  912. status->hw_ptr = runtime->status->hw_ptr;
  913. status->avail = snd_pcm_avail(substream);
  914. status->delay = snd_pcm_running(substream) ?
  915. snd_pcm_calc_delay(substream) : 0;
  916. status->avail_max = runtime->avail_max;
  917. status->overrange = runtime->overrange;
  918. runtime->avail_max = 0;
  919. runtime->overrange = 0;
  920. _end:
  921. snd_pcm_stream_unlock_irq(substream);
  922. return 0;
  923. }
  924. static int snd_pcm_status_user64(struct snd_pcm_substream *substream,
  925. struct snd_pcm_status64 __user * _status,
  926. bool ext)
  927. {
  928. struct snd_pcm_status64 status;
  929. int res;
  930. memset(&status, 0, sizeof(status));
  931. /*
  932. * with extension, parameters are read/write,
  933. * get audio_tstamp_data from user,
  934. * ignore rest of status structure
  935. */
  936. if (ext && get_user(status.audio_tstamp_data,
  937. (u32 __user *)(&_status->audio_tstamp_data)))
  938. return -EFAULT;
  939. res = snd_pcm_status64(substream, &status);
  940. if (res < 0)
  941. return res;
  942. if (copy_to_user(_status, &status, sizeof(status)))
  943. return -EFAULT;
  944. return 0;
  945. }
  946. static int snd_pcm_status_user32(struct snd_pcm_substream *substream,
  947. struct snd_pcm_status32 __user * _status,
  948. bool ext)
  949. {
  950. struct snd_pcm_status64 status64;
  951. struct snd_pcm_status32 status32;
  952. int res;
  953. memset(&status64, 0, sizeof(status64));
  954. memset(&status32, 0, sizeof(status32));
  955. /*
  956. * with extension, parameters are read/write,
  957. * get audio_tstamp_data from user,
  958. * ignore rest of status structure
  959. */
  960. if (ext && get_user(status64.audio_tstamp_data,
  961. (u32 __user *)(&_status->audio_tstamp_data)))
  962. return -EFAULT;
  963. res = snd_pcm_status64(substream, &status64);
  964. if (res < 0)
  965. return res;
  966. status32 = (struct snd_pcm_status32) {
  967. .state = status64.state,
  968. .trigger_tstamp_sec = status64.trigger_tstamp_sec,
  969. .trigger_tstamp_nsec = status64.trigger_tstamp_nsec,
  970. .tstamp_sec = status64.tstamp_sec,
  971. .tstamp_nsec = status64.tstamp_nsec,
  972. .appl_ptr = status64.appl_ptr,
  973. .hw_ptr = status64.hw_ptr,
  974. .delay = status64.delay,
  975. .avail = status64.avail,
  976. .avail_max = status64.avail_max,
  977. .overrange = status64.overrange,
  978. .suspended_state = status64.suspended_state,
  979. .audio_tstamp_data = status64.audio_tstamp_data,
  980. .audio_tstamp_sec = status64.audio_tstamp_sec,
  981. .audio_tstamp_nsec = status64.audio_tstamp_nsec,
  982. .driver_tstamp_sec = status64.audio_tstamp_sec,
  983. .driver_tstamp_nsec = status64.audio_tstamp_nsec,
  984. .audio_tstamp_accuracy = status64.audio_tstamp_accuracy,
  985. };
  986. if (copy_to_user(_status, &status32, sizeof(status32)))
  987. return -EFAULT;
  988. return 0;
  989. }
  990. static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
  991. struct snd_pcm_channel_info * info)
  992. {
  993. struct snd_pcm_runtime *runtime;
  994. unsigned int channel;
  995. channel = info->channel;
  996. runtime = substream->runtime;
  997. snd_pcm_stream_lock_irq(substream);
  998. if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
  999. snd_pcm_stream_unlock_irq(substream);
  1000. return -EBADFD;
  1001. }
  1002. snd_pcm_stream_unlock_irq(substream);
  1003. if (channel >= runtime->channels)
  1004. return -EINVAL;
  1005. memset(info, 0, sizeof(*info));
  1006. info->channel = channel;
  1007. return snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
  1008. }
  1009. static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
  1010. struct snd_pcm_channel_info __user * _info)
  1011. {
  1012. struct snd_pcm_channel_info info;
  1013. int res;
  1014. if (copy_from_user(&info, _info, sizeof(info)))
  1015. return -EFAULT;
  1016. res = snd_pcm_channel_info(substream, &info);
  1017. if (res < 0)
  1018. return res;
  1019. if (copy_to_user(_info, &info, sizeof(info)))
  1020. return -EFAULT;
  1021. return 0;
  1022. }
  1023. static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
  1024. {
  1025. struct snd_pcm_runtime *runtime = substream->runtime;
  1026. if (runtime->trigger_master == NULL)
  1027. return;
  1028. if (runtime->trigger_master == substream) {
  1029. if (!runtime->trigger_tstamp_latched)
  1030. snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
  1031. } else {
  1032. snd_pcm_trigger_tstamp(runtime->trigger_master);
  1033. runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp;
  1034. }
  1035. runtime->trigger_master = NULL;
  1036. }
  1037. #define ACTION_ARG_IGNORE (__force snd_pcm_state_t)0
  1038. struct action_ops {
  1039. int (*pre_action)(struct snd_pcm_substream *substream,
  1040. snd_pcm_state_t state);
  1041. int (*do_action)(struct snd_pcm_substream *substream,
  1042. snd_pcm_state_t state);
  1043. void (*undo_action)(struct snd_pcm_substream *substream,
  1044. snd_pcm_state_t state);
  1045. void (*post_action)(struct snd_pcm_substream *substream,
  1046. snd_pcm_state_t state);
  1047. };
  1048. /*
  1049. * this functions is core for handling of linked stream
  1050. * Note: the stream state might be changed also on failure
  1051. * Note2: call with calling stream lock + link lock
  1052. */
  1053. static int snd_pcm_action_group(const struct action_ops *ops,
  1054. struct snd_pcm_substream *substream,
  1055. snd_pcm_state_t state,
  1056. bool stream_lock)
  1057. {
  1058. struct snd_pcm_substream *s = NULL;
  1059. struct snd_pcm_substream *s1;
  1060. int res = 0, depth = 1;
  1061. snd_pcm_group_for_each_entry(s, substream) {
  1062. if (s != substream) {
  1063. if (!stream_lock)
  1064. mutex_lock_nested(&s->runtime->buffer_mutex, depth);
  1065. else if (s->pcm->nonatomic)
  1066. mutex_lock_nested(&s->self_group.mutex, depth);
  1067. else
  1068. spin_lock_nested(&s->self_group.lock, depth);
  1069. depth++;
  1070. }
  1071. res = ops->pre_action(s, state);
  1072. if (res < 0)
  1073. goto _unlock;
  1074. }
  1075. snd_pcm_group_for_each_entry(s, substream) {
  1076. res = ops->do_action(s, state);
  1077. if (res < 0) {
  1078. if (ops->undo_action) {
  1079. snd_pcm_group_for_each_entry(s1, substream) {
  1080. if (s1 == s) /* failed stream */
  1081. break;
  1082. ops->undo_action(s1, state);
  1083. }
  1084. }
  1085. s = NULL; /* unlock all */
  1086. goto _unlock;
  1087. }
  1088. }
  1089. snd_pcm_group_for_each_entry(s, substream) {
  1090. ops->post_action(s, state);
  1091. }
  1092. _unlock:
  1093. /* unlock streams */
  1094. snd_pcm_group_for_each_entry(s1, substream) {
  1095. if (s1 != substream) {
  1096. if (!stream_lock)
  1097. mutex_unlock(&s1->runtime->buffer_mutex);
  1098. else if (s1->pcm->nonatomic)
  1099. mutex_unlock(&s1->self_group.mutex);
  1100. else
  1101. spin_unlock(&s1->self_group.lock);
  1102. }
  1103. if (s1 == s) /* end */
  1104. break;
  1105. }
  1106. return res;
  1107. }
  1108. /*
  1109. * Note: call with stream lock
  1110. */
  1111. static int snd_pcm_action_single(const struct action_ops *ops,
  1112. struct snd_pcm_substream *substream,
  1113. snd_pcm_state_t state)
  1114. {
  1115. int res;
  1116. res = ops->pre_action(substream, state);
  1117. if (res < 0)
  1118. return res;
  1119. res = ops->do_action(substream, state);
  1120. if (res == 0)
  1121. ops->post_action(substream, state);
  1122. else if (ops->undo_action)
  1123. ops->undo_action(substream, state);
  1124. return res;
  1125. }
  1126. static void snd_pcm_group_assign(struct snd_pcm_substream *substream,
  1127. struct snd_pcm_group *new_group)
  1128. {
  1129. substream->group = new_group;
  1130. list_move(&substream->link_list, &new_group->substreams);
  1131. }
  1132. /*
  1133. * Unref and unlock the group, but keep the stream lock;
  1134. * when the group becomes empty and no longer referred, destroy itself
  1135. */
  1136. static void snd_pcm_group_unref(struct snd_pcm_group *group,
  1137. struct snd_pcm_substream *substream)
  1138. {
  1139. bool do_free;
  1140. if (!group)
  1141. return;
  1142. do_free = refcount_dec_and_test(&group->refs);
  1143. snd_pcm_group_unlock(group, substream->pcm->nonatomic);
  1144. if (do_free)
  1145. kfree(group);
  1146. }
  1147. /*
  1148. * Lock the group inside a stream lock and reference it;
  1149. * return the locked group object, or NULL if not linked
  1150. */
  1151. static struct snd_pcm_group *
  1152. snd_pcm_stream_group_ref(struct snd_pcm_substream *substream)
  1153. {
  1154. bool nonatomic = substream->pcm->nonatomic;
  1155. struct snd_pcm_group *group;
  1156. bool trylock;
  1157. for (;;) {
  1158. if (!snd_pcm_stream_linked(substream))
  1159. return NULL;
  1160. group = substream->group;
  1161. /* block freeing the group object */
  1162. refcount_inc(&group->refs);
  1163. trylock = nonatomic ? mutex_trylock(&group->mutex) :
  1164. spin_trylock(&group->lock);
  1165. if (trylock)
  1166. break; /* OK */
  1167. /* re-lock for avoiding ABBA deadlock */
  1168. snd_pcm_stream_unlock(substream);
  1169. snd_pcm_group_lock(group, nonatomic);
  1170. snd_pcm_stream_lock(substream);
  1171. /* check the group again; the above opens a small race window */
  1172. if (substream->group == group)
  1173. break; /* OK */
  1174. /* group changed, try again */
  1175. snd_pcm_group_unref(group, substream);
  1176. }
  1177. return group;
  1178. }
  1179. /*
  1180. * Note: call with stream lock
  1181. */
  1182. static int snd_pcm_action(const struct action_ops *ops,
  1183. struct snd_pcm_substream *substream,
  1184. snd_pcm_state_t state)
  1185. {
  1186. struct snd_pcm_group *group;
  1187. int res;
  1188. group = snd_pcm_stream_group_ref(substream);
  1189. if (group)
  1190. res = snd_pcm_action_group(ops, substream, state, true);
  1191. else
  1192. res = snd_pcm_action_single(ops, substream, state);
  1193. snd_pcm_group_unref(group, substream);
  1194. return res;
  1195. }
  1196. /*
  1197. * Note: don't use any locks before
  1198. */
  1199. static int snd_pcm_action_lock_irq(const struct action_ops *ops,
  1200. struct snd_pcm_substream *substream,
  1201. snd_pcm_state_t state)
  1202. {
  1203. int res;
  1204. snd_pcm_stream_lock_irq(substream);
  1205. res = snd_pcm_action(ops, substream, state);
  1206. snd_pcm_stream_unlock_irq(substream);
  1207. return res;
  1208. }
  1209. /*
  1210. */
  1211. static int snd_pcm_action_nonatomic(const struct action_ops *ops,
  1212. struct snd_pcm_substream *substream,
  1213. snd_pcm_state_t state)
  1214. {
  1215. int res;
  1216. /* Guarantee the group members won't change during non-atomic action */
  1217. down_read(&snd_pcm_link_rwsem);
  1218. res = snd_pcm_buffer_access_lock(substream->runtime);
  1219. if (res < 0)
  1220. goto unlock;
  1221. if (snd_pcm_stream_linked(substream))
  1222. res = snd_pcm_action_group(ops, substream, state, false);
  1223. else
  1224. res = snd_pcm_action_single(ops, substream, state);
  1225. snd_pcm_buffer_access_unlock(substream->runtime);
  1226. unlock:
  1227. up_read(&snd_pcm_link_rwsem);
  1228. return res;
  1229. }
  1230. /*
  1231. * start callbacks
  1232. */
  1233. static int snd_pcm_pre_start(struct snd_pcm_substream *substream,
  1234. snd_pcm_state_t state)
  1235. {
  1236. struct snd_pcm_runtime *runtime = substream->runtime;
  1237. if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
  1238. return -EBADFD;
  1239. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
  1240. !snd_pcm_playback_data(substream))
  1241. return -EPIPE;
  1242. runtime->trigger_tstamp_latched = false;
  1243. runtime->trigger_master = substream;
  1244. return 0;
  1245. }
  1246. static int snd_pcm_do_start(struct snd_pcm_substream *substream,
  1247. snd_pcm_state_t state)
  1248. {
  1249. if (substream->runtime->trigger_master != substream)
  1250. return 0;
  1251. return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
  1252. }
  1253. static void snd_pcm_undo_start(struct snd_pcm_substream *substream,
  1254. snd_pcm_state_t state)
  1255. {
  1256. if (substream->runtime->trigger_master == substream)
  1257. substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
  1258. }
  1259. static void snd_pcm_post_start(struct snd_pcm_substream *substream,
  1260. snd_pcm_state_t state)
  1261. {
  1262. struct snd_pcm_runtime *runtime = substream->runtime;
  1263. snd_pcm_trigger_tstamp(substream);
  1264. runtime->hw_ptr_jiffies = jiffies;
  1265. runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) /
  1266. runtime->rate;
  1267. runtime->status->state = state;
  1268. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
  1269. runtime->silence_size > 0)
  1270. snd_pcm_playback_silence(substream, ULONG_MAX);
  1271. snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTART);
  1272. }
  1273. static const struct action_ops snd_pcm_action_start = {
  1274. .pre_action = snd_pcm_pre_start,
  1275. .do_action = snd_pcm_do_start,
  1276. .undo_action = snd_pcm_undo_start,
  1277. .post_action = snd_pcm_post_start
  1278. };
  1279. /**
  1280. * snd_pcm_start - start all linked streams
  1281. * @substream: the PCM substream instance
  1282. *
  1283. * Return: Zero if successful, or a negative error code.
  1284. * The stream lock must be acquired before calling this function.
  1285. */
  1286. int snd_pcm_start(struct snd_pcm_substream *substream)
  1287. {
  1288. return snd_pcm_action(&snd_pcm_action_start, substream,
  1289. SNDRV_PCM_STATE_RUNNING);
  1290. }
  1291. /* take the stream lock and start the streams */
  1292. static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
  1293. {
  1294. return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream,
  1295. SNDRV_PCM_STATE_RUNNING);
  1296. }
  1297. /*
  1298. * stop callbacks
  1299. */
  1300. static int snd_pcm_pre_stop(struct snd_pcm_substream *substream,
  1301. snd_pcm_state_t state)
  1302. {
  1303. struct snd_pcm_runtime *runtime = substream->runtime;
  1304. if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
  1305. return -EBADFD;
  1306. runtime->trigger_master = substream;
  1307. return 0;
  1308. }
  1309. static int snd_pcm_do_stop(struct snd_pcm_substream *substream,
  1310. snd_pcm_state_t state)
  1311. {
  1312. if (substream->runtime->trigger_master == substream &&
  1313. snd_pcm_running(substream)) {
  1314. substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
  1315. substream->runtime->stop_operating = true;
  1316. }
  1317. return 0; /* unconditonally stop all substreams */
  1318. }
  1319. static void snd_pcm_post_stop(struct snd_pcm_substream *substream,
  1320. snd_pcm_state_t state)
  1321. {
  1322. struct snd_pcm_runtime *runtime = substream->runtime;
  1323. if (runtime->status->state != state) {
  1324. snd_pcm_trigger_tstamp(substream);
  1325. runtime->status->state = state;
  1326. snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
  1327. }
  1328. wake_up(&runtime->sleep);
  1329. wake_up(&runtime->tsleep);
  1330. }
  1331. static const struct action_ops snd_pcm_action_stop = {
  1332. .pre_action = snd_pcm_pre_stop,
  1333. .do_action = snd_pcm_do_stop,
  1334. .post_action = snd_pcm_post_stop
  1335. };
  1336. /**
  1337. * snd_pcm_stop - try to stop all running streams in the substream group
  1338. * @substream: the PCM substream instance
  1339. * @state: PCM state after stopping the stream
  1340. *
  1341. * The state of each stream is then changed to the given state unconditionally.
  1342. *
  1343. * Return: Zero if successful, or a negative error code.
  1344. */
  1345. int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
  1346. {
  1347. return snd_pcm_action(&snd_pcm_action_stop, substream, state);
  1348. }
  1349. EXPORT_SYMBOL(snd_pcm_stop);
  1350. /**
  1351. * snd_pcm_drain_done - stop the DMA only when the given stream is playback
  1352. * @substream: the PCM substream
  1353. *
  1354. * After stopping, the state is changed to SETUP.
  1355. * Unlike snd_pcm_stop(), this affects only the given stream.
  1356. *
  1357. * Return: Zero if succesful, or a negative error code.
  1358. */
  1359. int snd_pcm_drain_done(struct snd_pcm_substream *substream)
  1360. {
  1361. return snd_pcm_action_single(&snd_pcm_action_stop, substream,
  1362. SNDRV_PCM_STATE_SETUP);
  1363. }
  1364. /**
  1365. * snd_pcm_stop_xrun - stop the running streams as XRUN
  1366. * @substream: the PCM substream instance
  1367. *
  1368. * This stops the given running substream (and all linked substreams) as XRUN.
  1369. * Unlike snd_pcm_stop(), this function takes the substream lock by itself.
  1370. *
  1371. * Return: Zero if successful, or a negative error code.
  1372. */
  1373. int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
  1374. {
  1375. unsigned long flags;
  1376. snd_pcm_stream_lock_irqsave(substream, flags);
  1377. if (substream->runtime && snd_pcm_running(substream))
  1378. __snd_pcm_xrun(substream);
  1379. snd_pcm_stream_unlock_irqrestore(substream, flags);
  1380. return 0;
  1381. }
  1382. EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
  1383. /*
  1384. * pause callbacks: pass boolean (to start pause or resume) as state argument
  1385. */
  1386. #define pause_pushed(state) (__force bool)(state)
  1387. static int snd_pcm_pre_pause(struct snd_pcm_substream *substream,
  1388. snd_pcm_state_t state)
  1389. {
  1390. struct snd_pcm_runtime *runtime = substream->runtime;
  1391. if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
  1392. return -ENOSYS;
  1393. if (pause_pushed(state)) {
  1394. if (runtime->status->state != SNDRV_PCM_STATE_RUNNING)
  1395. return -EBADFD;
  1396. } else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED)
  1397. return -EBADFD;
  1398. runtime->trigger_master = substream;
  1399. return 0;
  1400. }
  1401. static int snd_pcm_do_pause(struct snd_pcm_substream *substream,
  1402. snd_pcm_state_t state)
  1403. {
  1404. if (substream->runtime->trigger_master != substream)
  1405. return 0;
  1406. /* some drivers might use hw_ptr to recover from the pause -
  1407. update the hw_ptr now */
  1408. if (pause_pushed(state))
  1409. snd_pcm_update_hw_ptr(substream);
  1410. /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
  1411. * a delta between the current jiffies, this gives a large enough
  1412. * delta, effectively to skip the check once.
  1413. */
  1414. substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
  1415. return substream->ops->trigger(substream,
  1416. pause_pushed(state) ?
  1417. SNDRV_PCM_TRIGGER_PAUSE_PUSH :
  1418. SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
  1419. }
  1420. static void snd_pcm_undo_pause(struct snd_pcm_substream *substream,
  1421. snd_pcm_state_t state)
  1422. {
  1423. if (substream->runtime->trigger_master == substream)
  1424. substream->ops->trigger(substream,
  1425. pause_pushed(state) ?
  1426. SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
  1427. SNDRV_PCM_TRIGGER_PAUSE_PUSH);
  1428. }
  1429. static void snd_pcm_post_pause(struct snd_pcm_substream *substream,
  1430. snd_pcm_state_t state)
  1431. {
  1432. struct snd_pcm_runtime *runtime = substream->runtime;
  1433. snd_pcm_trigger_tstamp(substream);
  1434. if (pause_pushed(state)) {
  1435. runtime->status->state = SNDRV_PCM_STATE_PAUSED;
  1436. snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
  1437. wake_up(&runtime->sleep);
  1438. wake_up(&runtime->tsleep);
  1439. } else {
  1440. runtime->status->state = SNDRV_PCM_STATE_RUNNING;
  1441. snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MCONTINUE);
  1442. }
  1443. }
  1444. static const struct action_ops snd_pcm_action_pause = {
  1445. .pre_action = snd_pcm_pre_pause,
  1446. .do_action = snd_pcm_do_pause,
  1447. .undo_action = snd_pcm_undo_pause,
  1448. .post_action = snd_pcm_post_pause
  1449. };
  1450. /*
  1451. * Push/release the pause for all linked streams.
  1452. */
  1453. static int snd_pcm_pause(struct snd_pcm_substream *substream, bool push)
  1454. {
  1455. return snd_pcm_action(&snd_pcm_action_pause, substream,
  1456. (__force snd_pcm_state_t)push);
  1457. }
  1458. static int snd_pcm_pause_lock_irq(struct snd_pcm_substream *substream,
  1459. bool push)
  1460. {
  1461. return snd_pcm_action_lock_irq(&snd_pcm_action_pause, substream,
  1462. (__force snd_pcm_state_t)push);
  1463. }
  1464. #ifdef CONFIG_PM
  1465. /* suspend callback: state argument ignored */
  1466. static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream,
  1467. snd_pcm_state_t state)
  1468. {
  1469. struct snd_pcm_runtime *runtime = substream->runtime;
  1470. switch (runtime->status->state) {
  1471. case SNDRV_PCM_STATE_SUSPENDED:
  1472. return -EBUSY;
  1473. /* unresumable PCM state; return -EBUSY for skipping suspend */
  1474. case SNDRV_PCM_STATE_OPEN:
  1475. case SNDRV_PCM_STATE_SETUP:
  1476. case SNDRV_PCM_STATE_DISCONNECTED:
  1477. return -EBUSY;
  1478. }
  1479. runtime->trigger_master = substream;
  1480. return 0;
  1481. }
  1482. static int snd_pcm_do_suspend(struct snd_pcm_substream *substream,
  1483. snd_pcm_state_t state)
  1484. {
  1485. struct snd_pcm_runtime *runtime = substream->runtime;
  1486. if (runtime->trigger_master != substream)
  1487. return 0;
  1488. if (! snd_pcm_running(substream))
  1489. return 0;
  1490. substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
  1491. runtime->stop_operating = true;
  1492. return 0; /* suspend unconditionally */
  1493. }
  1494. static void snd_pcm_post_suspend(struct snd_pcm_substream *substream,
  1495. snd_pcm_state_t state)
  1496. {
  1497. struct snd_pcm_runtime *runtime = substream->runtime;
  1498. snd_pcm_trigger_tstamp(substream);
  1499. runtime->status->suspended_state = runtime->status->state;
  1500. runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
  1501. snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSUSPEND);
  1502. wake_up(&runtime->sleep);
  1503. wake_up(&runtime->tsleep);
  1504. }
  1505. static const struct action_ops snd_pcm_action_suspend = {
  1506. .pre_action = snd_pcm_pre_suspend,
  1507. .do_action = snd_pcm_do_suspend,
  1508. .post_action = snd_pcm_post_suspend
  1509. };
  1510. /*
  1511. * snd_pcm_suspend - trigger SUSPEND to all linked streams
  1512. * @substream: the PCM substream
  1513. *
  1514. * After this call, all streams are changed to SUSPENDED state.
  1515. *
  1516. * Return: Zero if successful, or a negative error code.
  1517. */
  1518. static int snd_pcm_suspend(struct snd_pcm_substream *substream)
  1519. {
  1520. int err;
  1521. unsigned long flags;
  1522. snd_pcm_stream_lock_irqsave(substream, flags);
  1523. err = snd_pcm_action(&snd_pcm_action_suspend, substream,
  1524. ACTION_ARG_IGNORE);
  1525. snd_pcm_stream_unlock_irqrestore(substream, flags);
  1526. return err;
  1527. }
  1528. /**
  1529. * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
  1530. * @pcm: the PCM instance
  1531. *
  1532. * After this call, all streams are changed to SUSPENDED state.
  1533. *
  1534. * Return: Zero if successful (or @pcm is %NULL), or a negative error code.
  1535. */
  1536. int snd_pcm_suspend_all(struct snd_pcm *pcm)
  1537. {
  1538. struct snd_pcm_substream *substream;
  1539. int stream, err = 0;
  1540. if (! pcm)
  1541. return 0;
  1542. for (stream = 0; stream < 2; stream++) {
  1543. for (substream = pcm->streams[stream].substream;
  1544. substream; substream = substream->next) {
  1545. /* FIXME: the open/close code should lock this as well */
  1546. if (substream->runtime == NULL)
  1547. continue;
  1548. /*
  1549. * Skip BE dai link PCM's that are internal and may
  1550. * not have their substream ops set.
  1551. */
  1552. if (!substream->ops)
  1553. continue;
  1554. err = snd_pcm_suspend(substream);
  1555. if (err < 0 && err != -EBUSY)
  1556. return err;
  1557. }
  1558. }
  1559. for (stream = 0; stream < 2; stream++)
  1560. for (substream = pcm->streams[stream].substream;
  1561. substream; substream = substream->next)
  1562. snd_pcm_sync_stop(substream, false);
  1563. return 0;
  1564. }
  1565. EXPORT_SYMBOL(snd_pcm_suspend_all);
  1566. /* resume callbacks: state argument ignored */
  1567. static int snd_pcm_pre_resume(struct snd_pcm_substream *substream,
  1568. snd_pcm_state_t state)
  1569. {
  1570. struct snd_pcm_runtime *runtime = substream->runtime;
  1571. if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
  1572. return -ENOSYS;
  1573. runtime->trigger_master = substream;
  1574. return 0;
  1575. }
  1576. static int snd_pcm_do_resume(struct snd_pcm_substream *substream,
  1577. snd_pcm_state_t state)
  1578. {
  1579. struct snd_pcm_runtime *runtime = substream->runtime;
  1580. if (runtime->trigger_master != substream)
  1581. return 0;
  1582. /* DMA not running previously? */
  1583. if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING &&
  1584. (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING ||
  1585. substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
  1586. return 0;
  1587. return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
  1588. }
  1589. static void snd_pcm_undo_resume(struct snd_pcm_substream *substream,
  1590. snd_pcm_state_t state)
  1591. {
  1592. if (substream->runtime->trigger_master == substream &&
  1593. snd_pcm_running(substream))
  1594. substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
  1595. }
  1596. static void snd_pcm_post_resume(struct snd_pcm_substream *substream,
  1597. snd_pcm_state_t state)
  1598. {
  1599. struct snd_pcm_runtime *runtime = substream->runtime;
  1600. snd_pcm_trigger_tstamp(substream);
  1601. runtime->status->state = runtime->status->suspended_state;
  1602. snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
  1603. }
  1604. static const struct action_ops snd_pcm_action_resume = {
  1605. .pre_action = snd_pcm_pre_resume,
  1606. .do_action = snd_pcm_do_resume,
  1607. .undo_action = snd_pcm_undo_resume,
  1608. .post_action = snd_pcm_post_resume
  1609. };
  1610. static int snd_pcm_resume(struct snd_pcm_substream *substream)
  1611. {
  1612. return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream,
  1613. ACTION_ARG_IGNORE);
  1614. }
  1615. #else
  1616. static int snd_pcm_resume(struct snd_pcm_substream *substream)
  1617. {
  1618. return -ENOSYS;
  1619. }
  1620. #endif /* CONFIG_PM */
  1621. /*
  1622. * xrun ioctl
  1623. *
  1624. * Change the RUNNING stream(s) to XRUN state.
  1625. */
  1626. static int snd_pcm_xrun(struct snd_pcm_substream *substream)
  1627. {
  1628. struct snd_pcm_runtime *runtime = substream->runtime;
  1629. int result;
  1630. snd_pcm_stream_lock_irq(substream);
  1631. switch (runtime->status->state) {
  1632. case SNDRV_PCM_STATE_XRUN:
  1633. result = 0; /* already there */
  1634. break;
  1635. case SNDRV_PCM_STATE_RUNNING:
  1636. __snd_pcm_xrun(substream);
  1637. result = 0;
  1638. break;
  1639. default:
  1640. result = -EBADFD;
  1641. }
  1642. snd_pcm_stream_unlock_irq(substream);
  1643. return result;
  1644. }
  1645. /*
  1646. * reset ioctl
  1647. */
  1648. /* reset callbacks: state argument ignored */
  1649. static int snd_pcm_pre_reset(struct snd_pcm_substream *substream,
  1650. snd_pcm_state_t state)
  1651. {
  1652. struct snd_pcm_runtime *runtime = substream->runtime;
  1653. switch (runtime->status->state) {
  1654. case SNDRV_PCM_STATE_RUNNING:
  1655. case SNDRV_PCM_STATE_PREPARED:
  1656. case SNDRV_PCM_STATE_PAUSED:
  1657. case SNDRV_PCM_STATE_SUSPENDED:
  1658. return 0;
  1659. default:
  1660. return -EBADFD;
  1661. }
  1662. }
  1663. static int snd_pcm_do_reset(struct snd_pcm_substream *substream,
  1664. snd_pcm_state_t state)
  1665. {
  1666. struct snd_pcm_runtime *runtime = substream->runtime;
  1667. int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
  1668. if (err < 0)
  1669. return err;
  1670. snd_pcm_stream_lock_irq(substream);
  1671. runtime->hw_ptr_base = 0;
  1672. runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
  1673. runtime->status->hw_ptr % runtime->period_size;
  1674. runtime->silence_start = runtime->status->hw_ptr;
  1675. runtime->silence_filled = 0;
  1676. snd_pcm_stream_unlock_irq(substream);
  1677. return 0;
  1678. }
  1679. static void snd_pcm_post_reset(struct snd_pcm_substream *substream,
  1680. snd_pcm_state_t state)
  1681. {
  1682. struct snd_pcm_runtime *runtime = substream->runtime;
  1683. snd_pcm_stream_lock_irq(substream);
  1684. runtime->control->appl_ptr = runtime->status->hw_ptr;
  1685. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
  1686. runtime->silence_size > 0)
  1687. snd_pcm_playback_silence(substream, ULONG_MAX);
  1688. snd_pcm_stream_unlock_irq(substream);
  1689. }
  1690. static const struct action_ops snd_pcm_action_reset = {
  1691. .pre_action = snd_pcm_pre_reset,
  1692. .do_action = snd_pcm_do_reset,
  1693. .post_action = snd_pcm_post_reset
  1694. };
  1695. static int snd_pcm_reset(struct snd_pcm_substream *substream)
  1696. {
  1697. return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream,
  1698. ACTION_ARG_IGNORE);
  1699. }
  1700. /*
  1701. * prepare ioctl
  1702. */
  1703. /* pass f_flags as state argument */
  1704. static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
  1705. snd_pcm_state_t state)
  1706. {
  1707. struct snd_pcm_runtime *runtime = substream->runtime;
  1708. int f_flags = (__force int)state;
  1709. if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
  1710. runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
  1711. return -EBADFD;
  1712. if (snd_pcm_running(substream))
  1713. return -EBUSY;
  1714. substream->f_flags = f_flags;
  1715. return 0;
  1716. }
  1717. static int snd_pcm_do_prepare(struct snd_pcm_substream *substream,
  1718. snd_pcm_state_t state)
  1719. {
  1720. int err;
  1721. snd_pcm_sync_stop(substream, true);
  1722. err = substream->ops->prepare(substream);
  1723. if (err < 0)
  1724. return err;
  1725. return snd_pcm_do_reset(substream, state);
  1726. }
  1727. static void snd_pcm_post_prepare(struct snd_pcm_substream *substream,
  1728. snd_pcm_state_t state)
  1729. {
  1730. struct snd_pcm_runtime *runtime = substream->runtime;
  1731. runtime->control->appl_ptr = runtime->status->hw_ptr;
  1732. snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
  1733. }
  1734. static const struct action_ops snd_pcm_action_prepare = {
  1735. .pre_action = snd_pcm_pre_prepare,
  1736. .do_action = snd_pcm_do_prepare,
  1737. .post_action = snd_pcm_post_prepare
  1738. };
  1739. /**
  1740. * snd_pcm_prepare - prepare the PCM substream to be triggerable
  1741. * @substream: the PCM substream instance
  1742. * @file: file to refer f_flags
  1743. *
  1744. * Return: Zero if successful, or a negative error code.
  1745. */
  1746. static int snd_pcm_prepare(struct snd_pcm_substream *substream,
  1747. struct file *file)
  1748. {
  1749. int f_flags;
  1750. if (file)
  1751. f_flags = file->f_flags;
  1752. else
  1753. f_flags = substream->f_flags;
  1754. snd_pcm_stream_lock_irq(substream);
  1755. switch (substream->runtime->status->state) {
  1756. case SNDRV_PCM_STATE_PAUSED:
  1757. snd_pcm_pause(substream, false);
  1758. fallthrough;
  1759. case SNDRV_PCM_STATE_SUSPENDED:
  1760. snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
  1761. break;
  1762. }
  1763. snd_pcm_stream_unlock_irq(substream);
  1764. return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
  1765. substream,
  1766. (__force snd_pcm_state_t)f_flags);
  1767. }
  1768. /*
  1769. * drain ioctl
  1770. */
  1771. /* drain init callbacks: state argument ignored */
  1772. static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream,
  1773. snd_pcm_state_t state)
  1774. {
  1775. struct snd_pcm_runtime *runtime = substream->runtime;
  1776. switch (runtime->status->state) {
  1777. case SNDRV_PCM_STATE_OPEN:
  1778. case SNDRV_PCM_STATE_DISCONNECTED:
  1779. case SNDRV_PCM_STATE_SUSPENDED:
  1780. return -EBADFD;
  1781. }
  1782. runtime->trigger_master = substream;
  1783. return 0;
  1784. }
  1785. static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream,
  1786. snd_pcm_state_t state)
  1787. {
  1788. struct snd_pcm_runtime *runtime = substream->runtime;
  1789. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1790. switch (runtime->status->state) {
  1791. case SNDRV_PCM_STATE_PREPARED:
  1792. /* start playback stream if possible */
  1793. if (! snd_pcm_playback_empty(substream)) {
  1794. snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
  1795. snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
  1796. } else {
  1797. runtime->status->state = SNDRV_PCM_STATE_SETUP;
  1798. }
  1799. break;
  1800. case SNDRV_PCM_STATE_RUNNING:
  1801. runtime->status->state = SNDRV_PCM_STATE_DRAINING;
  1802. break;
  1803. case SNDRV_PCM_STATE_XRUN:
  1804. runtime->status->state = SNDRV_PCM_STATE_SETUP;
  1805. break;
  1806. default:
  1807. break;
  1808. }
  1809. } else {
  1810. /* stop running stream */
  1811. if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) {
  1812. snd_pcm_state_t new_state;
  1813. new_state = snd_pcm_capture_avail(runtime) > 0 ?
  1814. SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
  1815. snd_pcm_do_stop(substream, new_state);
  1816. snd_pcm_post_stop(substream, new_state);
  1817. }
  1818. }
  1819. if (runtime->status->state == SNDRV_PCM_STATE_DRAINING &&
  1820. runtime->trigger_master == substream &&
  1821. (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER))
  1822. return substream->ops->trigger(substream,
  1823. SNDRV_PCM_TRIGGER_DRAIN);
  1824. return 0;
  1825. }
  1826. static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream,
  1827. snd_pcm_state_t state)
  1828. {
  1829. }
  1830. static const struct action_ops snd_pcm_action_drain_init = {
  1831. .pre_action = snd_pcm_pre_drain_init,
  1832. .do_action = snd_pcm_do_drain_init,
  1833. .post_action = snd_pcm_post_drain_init
  1834. };
  1835. /*
  1836. * Drain the stream(s).
  1837. * When the substream is linked, sync until the draining of all playback streams
  1838. * is finished.
  1839. * After this call, all streams are supposed to be either SETUP or DRAINING
  1840. * (capture only) state.
  1841. */
  1842. static int snd_pcm_drain(struct snd_pcm_substream *substream,
  1843. struct file *file)
  1844. {
  1845. struct snd_card *card;
  1846. struct snd_pcm_runtime *runtime;
  1847. struct snd_pcm_substream *s;
  1848. struct snd_pcm_group *group;
  1849. wait_queue_entry_t wait;
  1850. int result = 0;
  1851. int nonblock = 0;
  1852. card = substream->pcm->card;
  1853. runtime = substream->runtime;
  1854. if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
  1855. return -EBADFD;
  1856. if (file) {
  1857. if (file->f_flags & O_NONBLOCK)
  1858. nonblock = 1;
  1859. } else if (substream->f_flags & O_NONBLOCK)
  1860. nonblock = 1;
  1861. snd_pcm_stream_lock_irq(substream);
  1862. /* resume pause */
  1863. if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
  1864. snd_pcm_pause(substream, false);
  1865. /* pre-start/stop - all running streams are changed to DRAINING state */
  1866. result = snd_pcm_action(&snd_pcm_action_drain_init, substream,
  1867. ACTION_ARG_IGNORE);
  1868. if (result < 0)
  1869. goto unlock;
  1870. /* in non-blocking, we don't wait in ioctl but let caller poll */
  1871. if (nonblock) {
  1872. result = -EAGAIN;
  1873. goto unlock;
  1874. }
  1875. for (;;) {
  1876. long tout;
  1877. struct snd_pcm_runtime *to_check;
  1878. if (signal_pending(current)) {
  1879. result = -ERESTARTSYS;
  1880. break;
  1881. }
  1882. /* find a substream to drain */
  1883. to_check = NULL;
  1884. group = snd_pcm_stream_group_ref(substream);
  1885. snd_pcm_group_for_each_entry(s, substream) {
  1886. if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
  1887. continue;
  1888. runtime = s->runtime;
  1889. if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
  1890. to_check = runtime;
  1891. break;
  1892. }
  1893. }
  1894. snd_pcm_group_unref(group, substream);
  1895. if (!to_check)
  1896. break; /* all drained */
  1897. init_waitqueue_entry(&wait, current);
  1898. set_current_state(TASK_INTERRUPTIBLE);
  1899. add_wait_queue(&to_check->sleep, &wait);
  1900. snd_pcm_stream_unlock_irq(substream);
  1901. if (runtime->no_period_wakeup)
  1902. tout = MAX_SCHEDULE_TIMEOUT;
  1903. else {
  1904. tout = 10;
  1905. if (runtime->rate) {
  1906. long t = runtime->period_size * 2 / runtime->rate;
  1907. tout = max(t, tout);
  1908. }
  1909. tout = msecs_to_jiffies(tout * 1000);
  1910. }
  1911. tout = schedule_timeout(tout);
  1912. snd_pcm_stream_lock_irq(substream);
  1913. group = snd_pcm_stream_group_ref(substream);
  1914. snd_pcm_group_for_each_entry(s, substream) {
  1915. if (s->runtime == to_check) {
  1916. remove_wait_queue(&to_check->sleep, &wait);
  1917. break;
  1918. }
  1919. }
  1920. snd_pcm_group_unref(group, substream);
  1921. if (card->shutdown) {
  1922. result = -ENODEV;
  1923. break;
  1924. }
  1925. if (tout == 0) {
  1926. if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
  1927. result = -ESTRPIPE;
  1928. else {
  1929. dev_dbg(substream->pcm->card->dev,
  1930. "playback drain error (DMA or IRQ trouble?)\n");
  1931. snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
  1932. result = -EIO;
  1933. }
  1934. break;
  1935. }
  1936. }
  1937. unlock:
  1938. snd_pcm_stream_unlock_irq(substream);
  1939. return result;
  1940. }
  1941. /*
  1942. * drop ioctl
  1943. *
  1944. * Immediately put all linked substreams into SETUP state.
  1945. */
  1946. static int snd_pcm_drop(struct snd_pcm_substream *substream)
  1947. {
  1948. struct snd_pcm_runtime *runtime;
  1949. int result = 0;
  1950. if (PCM_RUNTIME_CHECK(substream))
  1951. return -ENXIO;
  1952. runtime = substream->runtime;
  1953. if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
  1954. runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
  1955. return -EBADFD;
  1956. snd_pcm_stream_lock_irq(substream);
  1957. /* resume pause */
  1958. if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
  1959. snd_pcm_pause(substream, false);
  1960. snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
  1961. /* runtime->control->appl_ptr = runtime->status->hw_ptr; */
  1962. snd_pcm_stream_unlock_irq(substream);
  1963. return result;
  1964. }
  1965. static bool is_pcm_file(struct file *file)
  1966. {
  1967. struct inode *inode = file_inode(file);
  1968. struct snd_pcm *pcm;
  1969. unsigned int minor;
  1970. if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
  1971. return false;
  1972. minor = iminor(inode);
  1973. pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
  1974. if (!pcm)
  1975. pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
  1976. if (!pcm)
  1977. return false;
  1978. snd_card_unref(pcm->card);
  1979. return true;
  1980. }
  1981. /*
  1982. * PCM link handling
  1983. */
  1984. static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
  1985. {
  1986. int res = 0;
  1987. struct snd_pcm_file *pcm_file;
  1988. struct snd_pcm_substream *substream1;
  1989. struct snd_pcm_group *group, *target_group;
  1990. bool nonatomic = substream->pcm->nonatomic;
  1991. struct fd f = fdget(fd);
  1992. if (!f.file)
  1993. return -EBADFD;
  1994. if (!is_pcm_file(f.file)) {
  1995. res = -EBADFD;
  1996. goto _badf;
  1997. }
  1998. pcm_file = f.file->private_data;
  1999. substream1 = pcm_file->substream;
  2000. if (substream == substream1) {
  2001. res = -EINVAL;
  2002. goto _badf;
  2003. }
  2004. group = kzalloc(sizeof(*group), GFP_KERNEL);
  2005. if (!group) {
  2006. res = -ENOMEM;
  2007. goto _nolock;
  2008. }
  2009. snd_pcm_group_init(group);
  2010. down_write(&snd_pcm_link_rwsem);
  2011. if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
  2012. substream->runtime->status->state != substream1->runtime->status->state ||
  2013. substream->pcm->nonatomic != substream1->pcm->nonatomic) {
  2014. res = -EBADFD;
  2015. goto _end;
  2016. }
  2017. if (snd_pcm_stream_linked(substream1)) {
  2018. res = -EALREADY;
  2019. goto _end;
  2020. }
  2021. snd_pcm_stream_lock_irq(substream);
  2022. if (!snd_pcm_stream_linked(substream)) {
  2023. snd_pcm_group_assign(substream, group);
  2024. group = NULL; /* assigned, don't free this one below */
  2025. }
  2026. target_group = substream->group;
  2027. snd_pcm_stream_unlock_irq(substream);
  2028. snd_pcm_group_lock_irq(target_group, nonatomic);
  2029. snd_pcm_stream_lock_nested(substream1);
  2030. snd_pcm_group_assign(substream1, target_group);
  2031. refcount_inc(&target_group->refs);
  2032. snd_pcm_stream_unlock(substream1);
  2033. snd_pcm_group_unlock_irq(target_group, nonatomic);
  2034. _end:
  2035. up_write(&snd_pcm_link_rwsem);
  2036. _nolock:
  2037. kfree(group);
  2038. _badf:
  2039. fdput(f);
  2040. return res;
  2041. }
  2042. static void relink_to_local(struct snd_pcm_substream *substream)
  2043. {
  2044. snd_pcm_stream_lock_nested(substream);
  2045. snd_pcm_group_assign(substream, &substream->self_group);
  2046. snd_pcm_stream_unlock(substream);
  2047. }
  2048. static int snd_pcm_unlink(struct snd_pcm_substream *substream)
  2049. {
  2050. struct snd_pcm_group *group;
  2051. bool nonatomic = substream->pcm->nonatomic;
  2052. bool do_free = false;
  2053. int res = 0;
  2054. down_write(&snd_pcm_link_rwsem);
  2055. if (!snd_pcm_stream_linked(substream)) {
  2056. res = -EALREADY;
  2057. goto _end;
  2058. }
  2059. group = substream->group;
  2060. snd_pcm_group_lock_irq(group, nonatomic);
  2061. relink_to_local(substream);
  2062. refcount_dec(&group->refs);
  2063. /* detach the last stream, too */
  2064. if (list_is_singular(&group->substreams)) {
  2065. relink_to_local(list_first_entry(&group->substreams,
  2066. struct snd_pcm_substream,
  2067. link_list));
  2068. do_free = refcount_dec_and_test(&group->refs);
  2069. }
  2070. snd_pcm_group_unlock_irq(group, nonatomic);
  2071. if (do_free)
  2072. kfree(group);
  2073. _end:
  2074. up_write(&snd_pcm_link_rwsem);
  2075. return res;
  2076. }
  2077. /*
  2078. * hw configurator
  2079. */
  2080. static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params,
  2081. struct snd_pcm_hw_rule *rule)
  2082. {
  2083. struct snd_interval t;
  2084. snd_interval_mul(hw_param_interval_c(params, rule->deps[0]),
  2085. hw_param_interval_c(params, rule->deps[1]), &t);
  2086. return snd_interval_refine(hw_param_interval(params, rule->var), &t);
  2087. }
  2088. static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params,
  2089. struct snd_pcm_hw_rule *rule)
  2090. {
  2091. struct snd_interval t;
  2092. snd_interval_div(hw_param_interval_c(params, rule->deps[0]),
  2093. hw_param_interval_c(params, rule->deps[1]), &t);
  2094. return snd_interval_refine(hw_param_interval(params, rule->var), &t);
  2095. }
  2096. static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params,
  2097. struct snd_pcm_hw_rule *rule)
  2098. {
  2099. struct snd_interval t;
  2100. snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]),
  2101. hw_param_interval_c(params, rule->deps[1]),
  2102. (unsigned long) rule->private, &t);
  2103. return snd_interval_refine(hw_param_interval(params, rule->var), &t);
  2104. }
  2105. static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params,
  2106. struct snd_pcm_hw_rule *rule)
  2107. {
  2108. struct snd_interval t;
  2109. snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]),
  2110. (unsigned long) rule->private,
  2111. hw_param_interval_c(params, rule->deps[1]), &t);
  2112. return snd_interval_refine(hw_param_interval(params, rule->var), &t);
  2113. }
  2114. static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
  2115. struct snd_pcm_hw_rule *rule)
  2116. {
  2117. snd_pcm_format_t k;
  2118. const struct snd_interval *i =
  2119. hw_param_interval_c(params, rule->deps[0]);
  2120. struct snd_mask m;
  2121. struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
  2122. snd_mask_any(&m);
  2123. pcm_for_each_format(k) {
  2124. int bits;
  2125. if (!snd_mask_test_format(mask, k))
  2126. continue;
  2127. bits = snd_pcm_format_physical_width(k);
  2128. if (bits <= 0)
  2129. continue; /* ignore invalid formats */
  2130. if ((unsigned)bits < i->min || (unsigned)bits > i->max)
  2131. snd_mask_reset(&m, (__force unsigned)k);
  2132. }
  2133. return snd_mask_refine(mask, &m);
  2134. }
  2135. static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
  2136. struct snd_pcm_hw_rule *rule)
  2137. {
  2138. struct snd_interval t;
  2139. snd_pcm_format_t k;
  2140. t.min = UINT_MAX;
  2141. t.max = 0;
  2142. t.openmin = 0;
  2143. t.openmax = 0;
  2144. pcm_for_each_format(k) {
  2145. int bits;
  2146. if (!snd_mask_test_format(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
  2147. continue;
  2148. bits = snd_pcm_format_physical_width(k);
  2149. if (bits <= 0)
  2150. continue; /* ignore invalid formats */
  2151. if (t.min > (unsigned)bits)
  2152. t.min = bits;
  2153. if (t.max < (unsigned)bits)
  2154. t.max = bits;
  2155. }
  2156. t.integer = 1;
  2157. return snd_interval_refine(hw_param_interval(params, rule->var), &t);
  2158. }
  2159. #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12
  2160. #error "Change this table"
  2161. #endif
  2162. static const unsigned int rates[] = {
  2163. 5512, 8000, 11025, 16000, 22050, 32000, 44100,
  2164. 48000, 64000, 88200, 96000, 176400, 192000, 352800, 384000
  2165. };
  2166. const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
  2167. .count = ARRAY_SIZE(rates),
  2168. .list = rates,
  2169. };
  2170. static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params,
  2171. struct snd_pcm_hw_rule *rule)
  2172. {
  2173. struct snd_pcm_hardware *hw = rule->private;
  2174. return snd_interval_list(hw_param_interval(params, rule->var),
  2175. snd_pcm_known_rates.count,
  2176. snd_pcm_known_rates.list, hw->rates);
  2177. }
  2178. static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params,
  2179. struct snd_pcm_hw_rule *rule)
  2180. {
  2181. struct snd_interval t;
  2182. struct snd_pcm_substream *substream = rule->private;
  2183. t.min = 0;
  2184. t.max = substream->buffer_bytes_max;
  2185. t.openmin = 0;
  2186. t.openmax = 0;
  2187. t.integer = 1;
  2188. return snd_interval_refine(hw_param_interval(params, rule->var), &t);
  2189. }
  2190. static int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
  2191. {
  2192. struct snd_pcm_runtime *runtime = substream->runtime;
  2193. struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
  2194. int k, err;
  2195. for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
  2196. snd_mask_any(constrs_mask(constrs, k));
  2197. }
  2198. for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
  2199. snd_interval_any(constrs_interval(constrs, k));
  2200. }
  2201. snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS));
  2202. snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE));
  2203. snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES));
  2204. snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS));
  2205. snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS));
  2206. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
  2207. snd_pcm_hw_rule_format, NULL,
  2208. SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
  2209. if (err < 0)
  2210. return err;
  2211. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
  2212. snd_pcm_hw_rule_sample_bits, NULL,
  2213. SNDRV_PCM_HW_PARAM_FORMAT,
  2214. SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
  2215. if (err < 0)
  2216. return err;
  2217. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
  2218. snd_pcm_hw_rule_div, NULL,
  2219. SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
  2220. if (err < 0)
  2221. return err;
  2222. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
  2223. snd_pcm_hw_rule_mul, NULL,
  2224. SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
  2225. if (err < 0)
  2226. return err;
  2227. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
  2228. snd_pcm_hw_rule_mulkdiv, (void*) 8,
  2229. SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
  2230. if (err < 0)
  2231. return err;
  2232. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
  2233. snd_pcm_hw_rule_mulkdiv, (void*) 8,
  2234. SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
  2235. if (err < 0)
  2236. return err;
  2237. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
  2238. snd_pcm_hw_rule_div, NULL,
  2239. SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
  2240. if (err < 0)
  2241. return err;
  2242. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
  2243. snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
  2244. SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1);
  2245. if (err < 0)
  2246. return err;
  2247. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
  2248. snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
  2249. SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1);
  2250. if (err < 0)
  2251. return err;
  2252. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
  2253. snd_pcm_hw_rule_div, NULL,
  2254. SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
  2255. if (err < 0)
  2256. return err;
  2257. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
  2258. snd_pcm_hw_rule_div, NULL,
  2259. SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
  2260. if (err < 0)
  2261. return err;
  2262. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
  2263. snd_pcm_hw_rule_mulkdiv, (void*) 8,
  2264. SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
  2265. if (err < 0)
  2266. return err;
  2267. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
  2268. snd_pcm_hw_rule_muldivk, (void*) 1000000,
  2269. SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
  2270. if (err < 0)
  2271. return err;
  2272. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
  2273. snd_pcm_hw_rule_mul, NULL,
  2274. SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
  2275. if (err < 0)
  2276. return err;
  2277. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
  2278. snd_pcm_hw_rule_mulkdiv, (void*) 8,
  2279. SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
  2280. if (err < 0)
  2281. return err;
  2282. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
  2283. snd_pcm_hw_rule_muldivk, (void*) 1000000,
  2284. SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
  2285. if (err < 0)
  2286. return err;
  2287. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
  2288. snd_pcm_hw_rule_muldivk, (void*) 8,
  2289. SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
  2290. if (err < 0)
  2291. return err;
  2292. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
  2293. snd_pcm_hw_rule_muldivk, (void*) 8,
  2294. SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
  2295. if (err < 0)
  2296. return err;
  2297. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
  2298. snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
  2299. SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
  2300. if (err < 0)
  2301. return err;
  2302. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
  2303. snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
  2304. SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
  2305. if (err < 0)
  2306. return err;
  2307. return 0;
  2308. }
  2309. static int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
  2310. {
  2311. struct snd_pcm_runtime *runtime = substream->runtime;
  2312. struct snd_pcm_hardware *hw = &runtime->hw;
  2313. int err;
  2314. unsigned int mask = 0;
  2315. if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
  2316. mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_RW_INTERLEAVED);
  2317. if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
  2318. mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_RW_NONINTERLEAVED);
  2319. if (hw_support_mmap(substream)) {
  2320. if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
  2321. mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_INTERLEAVED);
  2322. if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
  2323. mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED);
  2324. if (hw->info & SNDRV_PCM_INFO_COMPLEX)
  2325. mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_COMPLEX);
  2326. }
  2327. err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
  2328. if (err < 0)
  2329. return err;
  2330. err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats);
  2331. if (err < 0)
  2332. return err;
  2333. err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT,
  2334. PARAM_MASK_BIT(SNDRV_PCM_SUBFORMAT_STD));
  2335. if (err < 0)
  2336. return err;
  2337. err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
  2338. hw->channels_min, hw->channels_max);
  2339. if (err < 0)
  2340. return err;
  2341. err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
  2342. hw->rate_min, hw->rate_max);
  2343. if (err < 0)
  2344. return err;
  2345. err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
  2346. hw->period_bytes_min, hw->period_bytes_max);
  2347. if (err < 0)
  2348. return err;
  2349. err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
  2350. hw->periods_min, hw->periods_max);
  2351. if (err < 0)
  2352. return err;
  2353. err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
  2354. hw->period_bytes_min, hw->buffer_bytes_max);
  2355. if (err < 0)
  2356. return err;
  2357. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
  2358. snd_pcm_hw_rule_buffer_bytes_max, substream,
  2359. SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1);
  2360. if (err < 0)
  2361. return err;
  2362. /* FIXME: remove */
  2363. if (runtime->dma_bytes) {
  2364. err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes);
  2365. if (err < 0)
  2366. return err;
  2367. }
  2368. if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) {
  2369. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
  2370. snd_pcm_hw_rule_rate, hw,
  2371. SNDRV_PCM_HW_PARAM_RATE, -1);
  2372. if (err < 0)
  2373. return err;
  2374. }
  2375. /* FIXME: this belong to lowlevel */
  2376. snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
  2377. return 0;
  2378. }
  2379. static void pcm_release_private(struct snd_pcm_substream *substream)
  2380. {
  2381. if (snd_pcm_stream_linked(substream))
  2382. snd_pcm_unlink(substream);
  2383. }
  2384. void snd_pcm_release_substream(struct snd_pcm_substream *substream)
  2385. {
  2386. substream->ref_count--;
  2387. if (substream->ref_count > 0)
  2388. return;
  2389. snd_pcm_drop(substream);
  2390. if (substream->hw_opened) {
  2391. if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
  2392. do_hw_free(substream);
  2393. substream->ops->close(substream);
  2394. substream->hw_opened = 0;
  2395. }
  2396. if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
  2397. cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
  2398. if (substream->pcm_release) {
  2399. substream->pcm_release(substream);
  2400. substream->pcm_release = NULL;
  2401. }
  2402. snd_pcm_detach_substream(substream);
  2403. }
  2404. EXPORT_SYMBOL(snd_pcm_release_substream);
  2405. int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
  2406. struct file *file,
  2407. struct snd_pcm_substream **rsubstream)
  2408. {
  2409. struct snd_pcm_substream *substream;
  2410. int err;
  2411. err = snd_pcm_attach_substream(pcm, stream, file, &substream);
  2412. if (err < 0)
  2413. return err;
  2414. if (substream->ref_count > 1) {
  2415. *rsubstream = substream;
  2416. return 0;
  2417. }
  2418. err = snd_pcm_hw_constraints_init(substream);
  2419. if (err < 0) {
  2420. pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n");
  2421. goto error;
  2422. }
  2423. if ((err = substream->ops->open(substream)) < 0)
  2424. goto error;
  2425. substream->hw_opened = 1;
  2426. err = snd_pcm_hw_constraints_complete(substream);
  2427. if (err < 0) {
  2428. pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n");
  2429. goto error;
  2430. }
  2431. *rsubstream = substream;
  2432. return 0;
  2433. error:
  2434. snd_pcm_release_substream(substream);
  2435. return err;
  2436. }
  2437. EXPORT_SYMBOL(snd_pcm_open_substream);
  2438. static int snd_pcm_open_file(struct file *file,
  2439. struct snd_pcm *pcm,
  2440. int stream)
  2441. {
  2442. struct snd_pcm_file *pcm_file;
  2443. struct snd_pcm_substream *substream;
  2444. int err;
  2445. err = snd_pcm_open_substream(pcm, stream, file, &substream);
  2446. if (err < 0)
  2447. return err;
  2448. pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL);
  2449. if (pcm_file == NULL) {
  2450. snd_pcm_release_substream(substream);
  2451. return -ENOMEM;
  2452. }
  2453. pcm_file->substream = substream;
  2454. if (substream->ref_count == 1)
  2455. substream->pcm_release = pcm_release_private;
  2456. file->private_data = pcm_file;
  2457. return 0;
  2458. }
  2459. static int snd_pcm_playback_open(struct inode *inode, struct file *file)
  2460. {
  2461. struct snd_pcm *pcm;
  2462. int err = nonseekable_open(inode, file);
  2463. if (err < 0)
  2464. return err;
  2465. pcm = snd_lookup_minor_data(iminor(inode),
  2466. SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
  2467. err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
  2468. if (pcm)
  2469. snd_card_unref(pcm->card);
  2470. return err;
  2471. }
  2472. static int snd_pcm_capture_open(struct inode *inode, struct file *file)
  2473. {
  2474. struct snd_pcm *pcm;
  2475. int err = nonseekable_open(inode, file);
  2476. if (err < 0)
  2477. return err;
  2478. pcm = snd_lookup_minor_data(iminor(inode),
  2479. SNDRV_DEVICE_TYPE_PCM_CAPTURE);
  2480. err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
  2481. if (pcm)
  2482. snd_card_unref(pcm->card);
  2483. return err;
  2484. }
  2485. static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
  2486. {
  2487. int err;
  2488. wait_queue_entry_t wait;
  2489. if (pcm == NULL) {
  2490. err = -ENODEV;
  2491. goto __error1;
  2492. }
  2493. err = snd_card_file_add(pcm->card, file);
  2494. if (err < 0)
  2495. goto __error1;
  2496. if (!try_module_get(pcm->card->module)) {
  2497. err = -EFAULT;
  2498. goto __error2;
  2499. }
  2500. init_waitqueue_entry(&wait, current);
  2501. add_wait_queue(&pcm->open_wait, &wait);
  2502. mutex_lock(&pcm->open_mutex);
  2503. while (1) {
  2504. err = snd_pcm_open_file(file, pcm, stream);
  2505. if (err >= 0)
  2506. break;
  2507. if (err == -EAGAIN) {
  2508. if (file->f_flags & O_NONBLOCK) {
  2509. err = -EBUSY;
  2510. break;
  2511. }
  2512. } else
  2513. break;
  2514. set_current_state(TASK_INTERRUPTIBLE);
  2515. mutex_unlock(&pcm->open_mutex);
  2516. schedule();
  2517. mutex_lock(&pcm->open_mutex);
  2518. if (pcm->card->shutdown) {
  2519. err = -ENODEV;
  2520. break;
  2521. }
  2522. if (signal_pending(current)) {
  2523. err = -ERESTARTSYS;
  2524. break;
  2525. }
  2526. }
  2527. remove_wait_queue(&pcm->open_wait, &wait);
  2528. mutex_unlock(&pcm->open_mutex);
  2529. if (err < 0)
  2530. goto __error;
  2531. return err;
  2532. __error:
  2533. module_put(pcm->card->module);
  2534. __error2:
  2535. snd_card_file_remove(pcm->card, file);
  2536. __error1:
  2537. return err;
  2538. }
  2539. static int snd_pcm_release(struct inode *inode, struct file *file)
  2540. {
  2541. struct snd_pcm *pcm;
  2542. struct snd_pcm_substream *substream;
  2543. struct snd_pcm_file *pcm_file;
  2544. pcm_file = file->private_data;
  2545. substream = pcm_file->substream;
  2546. if (snd_BUG_ON(!substream))
  2547. return -ENXIO;
  2548. pcm = substream->pcm;
  2549. mutex_lock(&pcm->open_mutex);
  2550. snd_pcm_release_substream(substream);
  2551. kfree(pcm_file);
  2552. mutex_unlock(&pcm->open_mutex);
  2553. wake_up(&pcm->open_wait);
  2554. module_put(pcm->card->module);
  2555. snd_card_file_remove(pcm->card, file);
  2556. return 0;
  2557. }
  2558. /* check and update PCM state; return 0 or a negative error
  2559. * call this inside PCM lock
  2560. */
  2561. static int do_pcm_hwsync(struct snd_pcm_substream *substream)
  2562. {
  2563. switch (substream->runtime->status->state) {
  2564. case SNDRV_PCM_STATE_DRAINING:
  2565. if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
  2566. return -EBADFD;
  2567. fallthrough;
  2568. case SNDRV_PCM_STATE_RUNNING:
  2569. return snd_pcm_update_hw_ptr(substream);
  2570. case SNDRV_PCM_STATE_PREPARED:
  2571. case SNDRV_PCM_STATE_PAUSED:
  2572. return 0;
  2573. case SNDRV_PCM_STATE_SUSPENDED:
  2574. return -ESTRPIPE;
  2575. case SNDRV_PCM_STATE_XRUN:
  2576. return -EPIPE;
  2577. default:
  2578. return -EBADFD;
  2579. }
  2580. }
  2581. /* increase the appl_ptr; returns the processed frames or a negative error */
  2582. static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
  2583. snd_pcm_uframes_t frames,
  2584. snd_pcm_sframes_t avail)
  2585. {
  2586. struct snd_pcm_runtime *runtime = substream->runtime;
  2587. snd_pcm_sframes_t appl_ptr;
  2588. int ret;
  2589. if (avail <= 0)
  2590. return 0;
  2591. if (frames > (snd_pcm_uframes_t)avail)
  2592. frames = avail;
  2593. appl_ptr = runtime->control->appl_ptr + frames;
  2594. if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
  2595. appl_ptr -= runtime->boundary;
  2596. ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
  2597. return ret < 0 ? ret : frames;
  2598. }
  2599. /* decrease the appl_ptr; returns the processed frames or zero for error */
  2600. static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
  2601. snd_pcm_uframes_t frames,
  2602. snd_pcm_sframes_t avail)
  2603. {
  2604. struct snd_pcm_runtime *runtime = substream->runtime;
  2605. snd_pcm_sframes_t appl_ptr;
  2606. int ret;
  2607. if (avail <= 0)
  2608. return 0;
  2609. if (frames > (snd_pcm_uframes_t)avail)
  2610. frames = avail;
  2611. appl_ptr = runtime->control->appl_ptr - frames;
  2612. if (appl_ptr < 0)
  2613. appl_ptr += runtime->boundary;
  2614. ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
  2615. /* NOTE: we return zero for errors because PulseAudio gets depressed
  2616. * upon receiving an error from rewind ioctl and stops processing
  2617. * any longer. Returning zero means that no rewind is done, so
  2618. * it's not absolutely wrong to answer like that.
  2619. */
  2620. return ret < 0 ? 0 : frames;
  2621. }
  2622. static snd_pcm_sframes_t snd_pcm_rewind(struct snd_pcm_substream *substream,
  2623. snd_pcm_uframes_t frames)
  2624. {
  2625. snd_pcm_sframes_t ret;
  2626. if (frames == 0)
  2627. return 0;
  2628. snd_pcm_stream_lock_irq(substream);
  2629. ret = do_pcm_hwsync(substream);
  2630. if (!ret)
  2631. ret = rewind_appl_ptr(substream, frames,
  2632. snd_pcm_hw_avail(substream));
  2633. snd_pcm_stream_unlock_irq(substream);
  2634. return ret;
  2635. }
  2636. static snd_pcm_sframes_t snd_pcm_forward(struct snd_pcm_substream *substream,
  2637. snd_pcm_uframes_t frames)
  2638. {
  2639. snd_pcm_sframes_t ret;
  2640. if (frames == 0)
  2641. return 0;
  2642. snd_pcm_stream_lock_irq(substream);
  2643. ret = do_pcm_hwsync(substream);
  2644. if (!ret)
  2645. ret = forward_appl_ptr(substream, frames,
  2646. snd_pcm_avail(substream));
  2647. snd_pcm_stream_unlock_irq(substream);
  2648. return ret;
  2649. }
  2650. static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
  2651. {
  2652. int err;
  2653. snd_pcm_stream_lock_irq(substream);
  2654. err = do_pcm_hwsync(substream);
  2655. snd_pcm_stream_unlock_irq(substream);
  2656. return err;
  2657. }
  2658. static int snd_pcm_delay(struct snd_pcm_substream *substream,
  2659. snd_pcm_sframes_t *delay)
  2660. {
  2661. int err;
  2662. snd_pcm_sframes_t n = 0;
  2663. snd_pcm_stream_lock_irq(substream);
  2664. err = do_pcm_hwsync(substream);
  2665. if (!err)
  2666. n = snd_pcm_calc_delay(substream);
  2667. snd_pcm_stream_unlock_irq(substream);
  2668. if (!err)
  2669. *delay = n;
  2670. return err;
  2671. }
  2672. static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
  2673. struct snd_pcm_sync_ptr __user *_sync_ptr)
  2674. {
  2675. struct snd_pcm_runtime *runtime = substream->runtime;
  2676. struct snd_pcm_sync_ptr sync_ptr;
  2677. volatile struct snd_pcm_mmap_status *status;
  2678. volatile struct snd_pcm_mmap_control *control;
  2679. int err;
  2680. memset(&sync_ptr, 0, sizeof(sync_ptr));
  2681. if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
  2682. return -EFAULT;
  2683. if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control)))
  2684. return -EFAULT;
  2685. status = runtime->status;
  2686. control = runtime->control;
  2687. if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
  2688. err = snd_pcm_hwsync(substream);
  2689. if (err < 0)
  2690. return err;
  2691. }
  2692. snd_pcm_stream_lock_irq(substream);
  2693. if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
  2694. err = pcm_lib_apply_appl_ptr(substream,
  2695. sync_ptr.c.control.appl_ptr);
  2696. if (err < 0) {
  2697. snd_pcm_stream_unlock_irq(substream);
  2698. return err;
  2699. }
  2700. } else {
  2701. sync_ptr.c.control.appl_ptr = control->appl_ptr;
  2702. }
  2703. if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
  2704. control->avail_min = sync_ptr.c.control.avail_min;
  2705. else
  2706. sync_ptr.c.control.avail_min = control->avail_min;
  2707. sync_ptr.s.status.state = status->state;
  2708. sync_ptr.s.status.hw_ptr = status->hw_ptr;
  2709. sync_ptr.s.status.tstamp = status->tstamp;
  2710. sync_ptr.s.status.suspended_state = status->suspended_state;
  2711. sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
  2712. snd_pcm_stream_unlock_irq(substream);
  2713. if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
  2714. return -EFAULT;
  2715. return 0;
  2716. }
  2717. struct snd_pcm_mmap_status32 {
  2718. snd_pcm_state_t state;
  2719. s32 pad1;
  2720. u32 hw_ptr;
  2721. s32 tstamp_sec;
  2722. s32 tstamp_nsec;
  2723. snd_pcm_state_t suspended_state;
  2724. s32 audio_tstamp_sec;
  2725. s32 audio_tstamp_nsec;
  2726. } __attribute__((packed));
  2727. struct snd_pcm_mmap_control32 {
  2728. u32 appl_ptr;
  2729. u32 avail_min;
  2730. };
  2731. struct snd_pcm_sync_ptr32 {
  2732. u32 flags;
  2733. union {
  2734. struct snd_pcm_mmap_status32 status;
  2735. unsigned char reserved[64];
  2736. } s;
  2737. union {
  2738. struct snd_pcm_mmap_control32 control;
  2739. unsigned char reserved[64];
  2740. } c;
  2741. } __attribute__((packed));
  2742. /* recalcuate the boundary within 32bit */
  2743. static snd_pcm_uframes_t recalculate_boundary(struct snd_pcm_runtime *runtime)
  2744. {
  2745. snd_pcm_uframes_t boundary;
  2746. if (! runtime->buffer_size)
  2747. return 0;
  2748. boundary = runtime->buffer_size;
  2749. while (boundary * 2 <= 0x7fffffffUL - runtime->buffer_size)
  2750. boundary *= 2;
  2751. return boundary;
  2752. }
  2753. static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
  2754. struct snd_pcm_sync_ptr32 __user *src)
  2755. {
  2756. struct snd_pcm_runtime *runtime = substream->runtime;
  2757. volatile struct snd_pcm_mmap_status *status;
  2758. volatile struct snd_pcm_mmap_control *control;
  2759. u32 sflags;
  2760. struct snd_pcm_mmap_control scontrol;
  2761. struct snd_pcm_mmap_status sstatus;
  2762. snd_pcm_uframes_t boundary;
  2763. int err;
  2764. if (snd_BUG_ON(!runtime))
  2765. return -EINVAL;
  2766. if (get_user(sflags, &src->flags) ||
  2767. get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
  2768. get_user(scontrol.avail_min, &src->c.control.avail_min))
  2769. return -EFAULT;
  2770. if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
  2771. err = snd_pcm_hwsync(substream);
  2772. if (err < 0)
  2773. return err;
  2774. }
  2775. status = runtime->status;
  2776. control = runtime->control;
  2777. boundary = recalculate_boundary(runtime);
  2778. if (! boundary)
  2779. boundary = 0x7fffffff;
  2780. snd_pcm_stream_lock_irq(substream);
  2781. /* FIXME: we should consider the boundary for the sync from app */
  2782. if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) {
  2783. err = pcm_lib_apply_appl_ptr(substream,
  2784. scontrol.appl_ptr);
  2785. if (err < 0) {
  2786. snd_pcm_stream_unlock_irq(substream);
  2787. return err;
  2788. }
  2789. } else
  2790. scontrol.appl_ptr = control->appl_ptr % boundary;
  2791. if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
  2792. control->avail_min = scontrol.avail_min;
  2793. else
  2794. scontrol.avail_min = control->avail_min;
  2795. sstatus.state = status->state;
  2796. sstatus.hw_ptr = status->hw_ptr % boundary;
  2797. sstatus.tstamp = status->tstamp;
  2798. sstatus.suspended_state = status->suspended_state;
  2799. sstatus.audio_tstamp = status->audio_tstamp;
  2800. snd_pcm_stream_unlock_irq(substream);
  2801. if (put_user(sstatus.state, &src->s.status.state) ||
  2802. put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
  2803. put_user(sstatus.tstamp.tv_sec, &src->s.status.tstamp_sec) ||
  2804. put_user(sstatus.tstamp.tv_nsec, &src->s.status.tstamp_nsec) ||
  2805. put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
  2806. put_user(sstatus.audio_tstamp.tv_sec, &src->s.status.audio_tstamp_sec) ||
  2807. put_user(sstatus.audio_tstamp.tv_nsec, &src->s.status.audio_tstamp_nsec) ||
  2808. put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
  2809. put_user(scontrol.avail_min, &src->c.control.avail_min))
  2810. return -EFAULT;
  2811. return 0;
  2812. }
  2813. #define __SNDRV_PCM_IOCTL_SYNC_PTR32 _IOWR('A', 0x23, struct snd_pcm_sync_ptr32)
  2814. static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
  2815. {
  2816. struct snd_pcm_runtime *runtime = substream->runtime;
  2817. int arg;
  2818. if (get_user(arg, _arg))
  2819. return -EFAULT;
  2820. if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST)
  2821. return -EINVAL;
  2822. runtime->tstamp_type = arg;
  2823. return 0;
  2824. }
  2825. static int snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream *substream,
  2826. struct snd_xferi __user *_xferi)
  2827. {
  2828. struct snd_xferi xferi;
  2829. struct snd_pcm_runtime *runtime = substream->runtime;
  2830. snd_pcm_sframes_t result;
  2831. if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
  2832. return -EBADFD;
  2833. if (put_user(0, &_xferi->result))
  2834. return -EFAULT;
  2835. if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
  2836. return -EFAULT;
  2837. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
  2838. result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames);
  2839. else
  2840. result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames);
  2841. if (put_user(result, &_xferi->result))
  2842. return -EFAULT;
  2843. return result < 0 ? result : 0;
  2844. }
  2845. static int snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream *substream,
  2846. struct snd_xfern __user *_xfern)
  2847. {
  2848. struct snd_xfern xfern;
  2849. struct snd_pcm_runtime *runtime = substream->runtime;
  2850. void *bufs;
  2851. snd_pcm_sframes_t result;
  2852. if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
  2853. return -EBADFD;
  2854. if (runtime->channels > 128)
  2855. return -EINVAL;
  2856. if (put_user(0, &_xfern->result))
  2857. return -EFAULT;
  2858. if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
  2859. return -EFAULT;
  2860. bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels);
  2861. if (IS_ERR(bufs))
  2862. return PTR_ERR(bufs);
  2863. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
  2864. result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
  2865. else
  2866. result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
  2867. kfree(bufs);
  2868. if (put_user(result, &_xfern->result))
  2869. return -EFAULT;
  2870. return result < 0 ? result : 0;
  2871. }
  2872. static int snd_pcm_rewind_ioctl(struct snd_pcm_substream *substream,
  2873. snd_pcm_uframes_t __user *_frames)
  2874. {
  2875. snd_pcm_uframes_t frames;
  2876. snd_pcm_sframes_t result;
  2877. if (get_user(frames, _frames))
  2878. return -EFAULT;
  2879. if (put_user(0, _frames))
  2880. return -EFAULT;
  2881. result = snd_pcm_rewind(substream, frames);
  2882. if (put_user(result, _frames))
  2883. return -EFAULT;
  2884. return result < 0 ? result : 0;
  2885. }
  2886. static int snd_pcm_forward_ioctl(struct snd_pcm_substream *substream,
  2887. snd_pcm_uframes_t __user *_frames)
  2888. {
  2889. snd_pcm_uframes_t frames;
  2890. snd_pcm_sframes_t result;
  2891. if (get_user(frames, _frames))
  2892. return -EFAULT;
  2893. if (put_user(0, _frames))
  2894. return -EFAULT;
  2895. result = snd_pcm_forward(substream, frames);
  2896. if (put_user(result, _frames))
  2897. return -EFAULT;
  2898. return result < 0 ? result : 0;
  2899. }
  2900. static int snd_pcm_common_ioctl(struct file *file,
  2901. struct snd_pcm_substream *substream,
  2902. unsigned int cmd, void __user *arg)
  2903. {
  2904. struct snd_pcm_file *pcm_file = file->private_data;
  2905. int res;
  2906. if (PCM_RUNTIME_CHECK(substream))
  2907. return -ENXIO;
  2908. res = snd_power_wait(substream->pcm->card, SNDRV_CTL_POWER_D0);
  2909. if (res < 0)
  2910. return res;
  2911. switch (cmd) {
  2912. case SNDRV_PCM_IOCTL_PVERSION:
  2913. return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0;
  2914. case SNDRV_PCM_IOCTL_INFO:
  2915. return snd_pcm_info_user(substream, arg);
  2916. case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */
  2917. return 0;
  2918. case SNDRV_PCM_IOCTL_TTSTAMP:
  2919. return snd_pcm_tstamp(substream, arg);
  2920. case SNDRV_PCM_IOCTL_USER_PVERSION:
  2921. if (get_user(pcm_file->user_pversion,
  2922. (unsigned int __user *)arg))
  2923. return -EFAULT;
  2924. return 0;
  2925. case SNDRV_PCM_IOCTL_HW_REFINE:
  2926. return snd_pcm_hw_refine_user(substream, arg);
  2927. case SNDRV_PCM_IOCTL_HW_PARAMS:
  2928. return snd_pcm_hw_params_user(substream, arg);
  2929. case SNDRV_PCM_IOCTL_HW_FREE:
  2930. return snd_pcm_hw_free(substream);
  2931. case SNDRV_PCM_IOCTL_SW_PARAMS:
  2932. return snd_pcm_sw_params_user(substream, arg);
  2933. case SNDRV_PCM_IOCTL_STATUS32:
  2934. return snd_pcm_status_user32(substream, arg, false);
  2935. case SNDRV_PCM_IOCTL_STATUS_EXT32:
  2936. return snd_pcm_status_user32(substream, arg, true);
  2937. case SNDRV_PCM_IOCTL_STATUS64:
  2938. return snd_pcm_status_user64(substream, arg, false);
  2939. case SNDRV_PCM_IOCTL_STATUS_EXT64:
  2940. return snd_pcm_status_user64(substream, arg, true);
  2941. case SNDRV_PCM_IOCTL_CHANNEL_INFO:
  2942. return snd_pcm_channel_info_user(substream, arg);
  2943. case SNDRV_PCM_IOCTL_PREPARE:
  2944. return snd_pcm_prepare(substream, file);
  2945. case SNDRV_PCM_IOCTL_RESET:
  2946. return snd_pcm_reset(substream);
  2947. case SNDRV_PCM_IOCTL_START:
  2948. return snd_pcm_start_lock_irq(substream);
  2949. case SNDRV_PCM_IOCTL_LINK:
  2950. return snd_pcm_link(substream, (int)(unsigned long) arg);
  2951. case SNDRV_PCM_IOCTL_UNLINK:
  2952. return snd_pcm_unlink(substream);
  2953. case SNDRV_PCM_IOCTL_RESUME:
  2954. return snd_pcm_resume(substream);
  2955. case SNDRV_PCM_IOCTL_XRUN:
  2956. return snd_pcm_xrun(substream);
  2957. case SNDRV_PCM_IOCTL_HWSYNC:
  2958. return snd_pcm_hwsync(substream);
  2959. case SNDRV_PCM_IOCTL_DELAY:
  2960. {
  2961. snd_pcm_sframes_t delay;
  2962. snd_pcm_sframes_t __user *res = arg;
  2963. int err;
  2964. err = snd_pcm_delay(substream, &delay);
  2965. if (err)
  2966. return err;
  2967. if (put_user(delay, res))
  2968. return -EFAULT;
  2969. return 0;
  2970. }
  2971. case __SNDRV_PCM_IOCTL_SYNC_PTR32:
  2972. return snd_pcm_ioctl_sync_ptr_compat(substream, arg);
  2973. case __SNDRV_PCM_IOCTL_SYNC_PTR64:
  2974. return snd_pcm_sync_ptr(substream, arg);
  2975. #ifdef CONFIG_SND_SUPPORT_OLD_API
  2976. case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
  2977. return snd_pcm_hw_refine_old_user(substream, arg);
  2978. case SNDRV_PCM_IOCTL_HW_PARAMS_OLD:
  2979. return snd_pcm_hw_params_old_user(substream, arg);
  2980. #endif
  2981. case SNDRV_PCM_IOCTL_DRAIN:
  2982. return snd_pcm_drain(substream, file);
  2983. case SNDRV_PCM_IOCTL_DROP:
  2984. return snd_pcm_drop(substream);
  2985. case SNDRV_PCM_IOCTL_PAUSE:
  2986. return snd_pcm_pause_lock_irq(substream, (unsigned long)arg);
  2987. case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
  2988. case SNDRV_PCM_IOCTL_READI_FRAMES:
  2989. return snd_pcm_xferi_frames_ioctl(substream, arg);
  2990. case SNDRV_PCM_IOCTL_WRITEN_FRAMES:
  2991. case SNDRV_PCM_IOCTL_READN_FRAMES:
  2992. return snd_pcm_xfern_frames_ioctl(substream, arg);
  2993. case SNDRV_PCM_IOCTL_REWIND:
  2994. return snd_pcm_rewind_ioctl(substream, arg);
  2995. case SNDRV_PCM_IOCTL_FORWARD:
  2996. return snd_pcm_forward_ioctl(substream, arg);
  2997. }
  2998. pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
  2999. return -ENOTTY;
  3000. }
  3001. static long snd_pcm_ioctl(struct file *file, unsigned int cmd,
  3002. unsigned long arg)
  3003. {
  3004. struct snd_pcm_file *pcm_file;
  3005. pcm_file = file->private_data;
  3006. if (((cmd >> 8) & 0xff) != 'A')
  3007. return -ENOTTY;
  3008. return snd_pcm_common_ioctl(file, pcm_file->substream, cmd,
  3009. (void __user *)arg);
  3010. }
  3011. /**
  3012. * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space
  3013. * @substream: PCM substream
  3014. * @cmd: IOCTL cmd
  3015. * @arg: IOCTL argument
  3016. *
  3017. * The function is provided primarily for OSS layer and USB gadget drivers,
  3018. * and it allows only the limited set of ioctls (hw_params, sw_params,
  3019. * prepare, start, drain, drop, forward).
  3020. */
  3021. int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
  3022. unsigned int cmd, void *arg)
  3023. {
  3024. snd_pcm_uframes_t *frames = arg;
  3025. snd_pcm_sframes_t result;
  3026. switch (cmd) {
  3027. case SNDRV_PCM_IOCTL_FORWARD:
  3028. {
  3029. /* provided only for OSS; capture-only and no value returned */
  3030. if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
  3031. return -EINVAL;
  3032. result = snd_pcm_forward(substream, *frames);
  3033. return result < 0 ? result : 0;
  3034. }
  3035. case SNDRV_PCM_IOCTL_HW_PARAMS:
  3036. return snd_pcm_hw_params(substream, arg);
  3037. case SNDRV_PCM_IOCTL_SW_PARAMS:
  3038. return snd_pcm_sw_params(substream, arg);
  3039. case SNDRV_PCM_IOCTL_PREPARE:
  3040. return snd_pcm_prepare(substream, NULL);
  3041. case SNDRV_PCM_IOCTL_START:
  3042. return snd_pcm_start_lock_irq(substream);
  3043. case SNDRV_PCM_IOCTL_DRAIN:
  3044. return snd_pcm_drain(substream, NULL);
  3045. case SNDRV_PCM_IOCTL_DROP:
  3046. return snd_pcm_drop(substream);
  3047. case SNDRV_PCM_IOCTL_DELAY:
  3048. return snd_pcm_delay(substream, frames);
  3049. default:
  3050. return -EINVAL;
  3051. }
  3052. }
  3053. EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
  3054. static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
  3055. loff_t * offset)
  3056. {
  3057. struct snd_pcm_file *pcm_file;
  3058. struct snd_pcm_substream *substream;
  3059. struct snd_pcm_runtime *runtime;
  3060. snd_pcm_sframes_t result;
  3061. pcm_file = file->private_data;
  3062. substream = pcm_file->substream;
  3063. if (PCM_RUNTIME_CHECK(substream))
  3064. return -ENXIO;
  3065. runtime = substream->runtime;
  3066. if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
  3067. return -EBADFD;
  3068. if (!frame_aligned(runtime, count))
  3069. return -EINVAL;
  3070. count = bytes_to_frames(runtime, count);
  3071. result = snd_pcm_lib_read(substream, buf, count);
  3072. if (result > 0)
  3073. result = frames_to_bytes(runtime, result);
  3074. return result;
  3075. }
  3076. static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
  3077. size_t count, loff_t * offset)
  3078. {
  3079. struct snd_pcm_file *pcm_file;
  3080. struct snd_pcm_substream *substream;
  3081. struct snd_pcm_runtime *runtime;
  3082. snd_pcm_sframes_t result;
  3083. pcm_file = file->private_data;
  3084. substream = pcm_file->substream;
  3085. if (PCM_RUNTIME_CHECK(substream))
  3086. return -ENXIO;
  3087. runtime = substream->runtime;
  3088. if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
  3089. return -EBADFD;
  3090. if (!frame_aligned(runtime, count))
  3091. return -EINVAL;
  3092. count = bytes_to_frames(runtime, count);
  3093. result = snd_pcm_lib_write(substream, buf, count);
  3094. if (result > 0)
  3095. result = frames_to_bytes(runtime, result);
  3096. return result;
  3097. }
  3098. static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
  3099. {
  3100. struct snd_pcm_file *pcm_file;
  3101. struct snd_pcm_substream *substream;
  3102. struct snd_pcm_runtime *runtime;
  3103. snd_pcm_sframes_t result;
  3104. unsigned long i;
  3105. void __user **bufs;
  3106. snd_pcm_uframes_t frames;
  3107. pcm_file = iocb->ki_filp->private_data;
  3108. substream = pcm_file->substream;
  3109. if (PCM_RUNTIME_CHECK(substream))
  3110. return -ENXIO;
  3111. runtime = substream->runtime;
  3112. if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
  3113. return -EBADFD;
  3114. if (!iter_is_iovec(to))
  3115. return -EINVAL;
  3116. if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
  3117. return -EINVAL;
  3118. if (!frame_aligned(runtime, to->iov->iov_len))
  3119. return -EINVAL;
  3120. frames = bytes_to_samples(runtime, to->iov->iov_len);
  3121. bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL);
  3122. if (bufs == NULL)
  3123. return -ENOMEM;
  3124. for (i = 0; i < to->nr_segs; ++i)
  3125. bufs[i] = to->iov[i].iov_base;
  3126. result = snd_pcm_lib_readv(substream, bufs, frames);
  3127. if (result > 0)
  3128. result = frames_to_bytes(runtime, result);
  3129. kfree(bufs);
  3130. return result;
  3131. }
  3132. static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
  3133. {
  3134. struct snd_pcm_file *pcm_file;
  3135. struct snd_pcm_substream *substream;
  3136. struct snd_pcm_runtime *runtime;
  3137. snd_pcm_sframes_t result;
  3138. unsigned long i;
  3139. void __user **bufs;
  3140. snd_pcm_uframes_t frames;
  3141. pcm_file = iocb->ki_filp->private_data;
  3142. substream = pcm_file->substream;
  3143. if (PCM_RUNTIME_CHECK(substream))
  3144. return -ENXIO;
  3145. runtime = substream->runtime;
  3146. if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
  3147. return -EBADFD;
  3148. if (!iter_is_iovec(from))
  3149. return -EINVAL;
  3150. if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
  3151. !frame_aligned(runtime, from->iov->iov_len))
  3152. return -EINVAL;
  3153. frames = bytes_to_samples(runtime, from->iov->iov_len);
  3154. bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL);
  3155. if (bufs == NULL)
  3156. return -ENOMEM;
  3157. for (i = 0; i < from->nr_segs; ++i)
  3158. bufs[i] = from->iov[i].iov_base;
  3159. result = snd_pcm_lib_writev(substream, bufs, frames);
  3160. if (result > 0)
  3161. result = frames_to_bytes(runtime, result);
  3162. kfree(bufs);
  3163. return result;
  3164. }
  3165. static __poll_t snd_pcm_poll(struct file *file, poll_table *wait)
  3166. {
  3167. struct snd_pcm_file *pcm_file;
  3168. struct snd_pcm_substream *substream;
  3169. struct snd_pcm_runtime *runtime;
  3170. __poll_t mask, ok;
  3171. snd_pcm_uframes_t avail;
  3172. pcm_file = file->private_data;
  3173. substream = pcm_file->substream;
  3174. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
  3175. ok = EPOLLOUT | EPOLLWRNORM;
  3176. else
  3177. ok = EPOLLIN | EPOLLRDNORM;
  3178. if (PCM_RUNTIME_CHECK(substream))
  3179. return ok | EPOLLERR;
  3180. runtime = substream->runtime;
  3181. poll_wait(file, &runtime->sleep, wait);
  3182. mask = 0;
  3183. snd_pcm_stream_lock_irq(substream);
  3184. avail = snd_pcm_avail(substream);
  3185. switch (runtime->status->state) {
  3186. case SNDRV_PCM_STATE_RUNNING:
  3187. case SNDRV_PCM_STATE_PREPARED:
  3188. case SNDRV_PCM_STATE_PAUSED:
  3189. if (avail >= runtime->control->avail_min)
  3190. mask = ok;
  3191. break;
  3192. case SNDRV_PCM_STATE_DRAINING:
  3193. if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
  3194. mask = ok;
  3195. if (!avail)
  3196. mask |= EPOLLERR;
  3197. }
  3198. break;
  3199. default:
  3200. mask = ok | EPOLLERR;
  3201. break;
  3202. }
  3203. snd_pcm_stream_unlock_irq(substream);
  3204. return mask;
  3205. }
  3206. /*
  3207. * mmap support
  3208. */
  3209. /*
  3210. * Only on coherent architectures, we can mmap the status and the control records
  3211. * for effcient data transfer. On others, we have to use HWSYNC ioctl...
  3212. */
  3213. #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA)
  3214. /*
  3215. * mmap status record
  3216. */
  3217. static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf)
  3218. {
  3219. struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
  3220. struct snd_pcm_runtime *runtime;
  3221. if (substream == NULL)
  3222. return VM_FAULT_SIGBUS;
  3223. runtime = substream->runtime;
  3224. vmf->page = virt_to_page(runtime->status);
  3225. get_page(vmf->page);
  3226. return 0;
  3227. }
  3228. static const struct vm_operations_struct snd_pcm_vm_ops_status =
  3229. {
  3230. .fault = snd_pcm_mmap_status_fault,
  3231. };
  3232. static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
  3233. struct vm_area_struct *area)
  3234. {
  3235. long size;
  3236. if (!(area->vm_flags & VM_READ))
  3237. return -EINVAL;
  3238. size = area->vm_end - area->vm_start;
  3239. if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
  3240. return -EINVAL;
  3241. area->vm_ops = &snd_pcm_vm_ops_status;
  3242. area->vm_private_data = substream;
  3243. area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  3244. return 0;
  3245. }
  3246. /*
  3247. * mmap control record
  3248. */
  3249. static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf)
  3250. {
  3251. struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
  3252. struct snd_pcm_runtime *runtime;
  3253. if (substream == NULL)
  3254. return VM_FAULT_SIGBUS;
  3255. runtime = substream->runtime;
  3256. vmf->page = virt_to_page(runtime->control);
  3257. get_page(vmf->page);
  3258. return 0;
  3259. }
  3260. static const struct vm_operations_struct snd_pcm_vm_ops_control =
  3261. {
  3262. .fault = snd_pcm_mmap_control_fault,
  3263. };
  3264. static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
  3265. struct vm_area_struct *area)
  3266. {
  3267. long size;
  3268. if (!(area->vm_flags & VM_READ))
  3269. return -EINVAL;
  3270. size = area->vm_end - area->vm_start;
  3271. if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
  3272. return -EINVAL;
  3273. area->vm_ops = &snd_pcm_vm_ops_control;
  3274. area->vm_private_data = substream;
  3275. area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  3276. return 0;
  3277. }
  3278. static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file)
  3279. {
  3280. /* See pcm_control_mmap_allowed() below.
  3281. * Since older alsa-lib requires both status and control mmaps to be
  3282. * coupled, we have to disable the status mmap for old alsa-lib, too.
  3283. */
  3284. if (pcm_file->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 14) &&
  3285. (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR))
  3286. return false;
  3287. return true;
  3288. }
  3289. static bool pcm_control_mmap_allowed(struct snd_pcm_file *pcm_file)
  3290. {
  3291. if (pcm_file->no_compat_mmap)
  3292. return false;
  3293. /* Disallow the control mmap when SYNC_APPLPTR flag is set;
  3294. * it enforces the user-space to fall back to snd_pcm_sync_ptr(),
  3295. * thus it effectively assures the manual update of appl_ptr.
  3296. */
  3297. if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)
  3298. return false;
  3299. return true;
  3300. }
  3301. #else /* ! coherent mmap */
  3302. /*
  3303. * don't support mmap for status and control records.
  3304. */
  3305. #define pcm_status_mmap_allowed(pcm_file) false
  3306. #define pcm_control_mmap_allowed(pcm_file) false
  3307. static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
  3308. struct vm_area_struct *area)
  3309. {
  3310. return -ENXIO;
  3311. }
  3312. static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
  3313. struct vm_area_struct *area)
  3314. {
  3315. return -ENXIO;
  3316. }
  3317. #endif /* coherent mmap */
  3318. static inline struct page *
  3319. snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
  3320. {
  3321. void *vaddr = substream->runtime->dma_area + ofs;
  3322. switch (substream->dma_buffer.dev.type) {
  3323. #ifdef CONFIG_SND_DMA_SGBUF
  3324. case SNDRV_DMA_TYPE_DEV_SG:
  3325. case SNDRV_DMA_TYPE_DEV_UC_SG:
  3326. return snd_pcm_sgbuf_ops_page(substream, ofs);
  3327. #endif /* CONFIG_SND_DMA_SGBUF */
  3328. case SNDRV_DMA_TYPE_VMALLOC:
  3329. return vmalloc_to_page(vaddr);
  3330. default:
  3331. return virt_to_page(vaddr);
  3332. }
  3333. }
  3334. /*
  3335. * fault callback for mmapping a RAM page
  3336. */
  3337. static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
  3338. {
  3339. struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
  3340. struct snd_pcm_runtime *runtime;
  3341. unsigned long offset;
  3342. struct page * page;
  3343. size_t dma_bytes;
  3344. if (substream == NULL)
  3345. return VM_FAULT_SIGBUS;
  3346. runtime = substream->runtime;
  3347. offset = vmf->pgoff << PAGE_SHIFT;
  3348. dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
  3349. if (offset > dma_bytes - PAGE_SIZE)
  3350. return VM_FAULT_SIGBUS;
  3351. if (substream->ops->page)
  3352. page = substream->ops->page(substream, offset);
  3353. else
  3354. page = snd_pcm_default_page_ops(substream, offset);
  3355. if (!page)
  3356. return VM_FAULT_SIGBUS;
  3357. get_page(page);
  3358. vmf->page = page;
  3359. return 0;
  3360. }
  3361. static const struct vm_operations_struct snd_pcm_vm_ops_data = {
  3362. .open = snd_pcm_mmap_data_open,
  3363. .close = snd_pcm_mmap_data_close,
  3364. };
  3365. static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
  3366. .open = snd_pcm_mmap_data_open,
  3367. .close = snd_pcm_mmap_data_close,
  3368. .fault = snd_pcm_mmap_data_fault,
  3369. };
  3370. /*
  3371. * mmap the DMA buffer on RAM
  3372. */
  3373. /**
  3374. * snd_pcm_lib_default_mmap - Default PCM data mmap function
  3375. * @substream: PCM substream
  3376. * @area: VMA
  3377. *
  3378. * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL,
  3379. * this function is invoked implicitly.
  3380. */
  3381. int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
  3382. struct vm_area_struct *area)
  3383. {
  3384. area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  3385. #ifdef CONFIG_GENERIC_ALLOCATOR
  3386. if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
  3387. area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
  3388. return remap_pfn_range(area, area->vm_start,
  3389. substream->dma_buffer.addr >> PAGE_SHIFT,
  3390. area->vm_end - area->vm_start, area->vm_page_prot);
  3391. }
  3392. #endif /* CONFIG_GENERIC_ALLOCATOR */
  3393. if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
  3394. (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV ||
  3395. substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC))
  3396. return dma_mmap_coherent(substream->dma_buffer.dev.dev,
  3397. area,
  3398. substream->runtime->dma_area,
  3399. substream->runtime->dma_addr,
  3400. substream->runtime->dma_bytes);
  3401. /* mmap with fault handler */
  3402. area->vm_ops = &snd_pcm_vm_ops_data_fault;
  3403. return 0;
  3404. }
  3405. EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
  3406. /*
  3407. * mmap the DMA buffer on I/O memory area
  3408. */
  3409. #if SNDRV_PCM_INFO_MMAP_IOMEM
  3410. /**
  3411. * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
  3412. * @substream: PCM substream
  3413. * @area: VMA
  3414. *
  3415. * When your hardware uses the iomapped pages as the hardware buffer and
  3416. * wants to mmap it, pass this function as mmap pcm_ops. Note that this
  3417. * is supposed to work only on limited architectures.
  3418. */
  3419. int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
  3420. struct vm_area_struct *area)
  3421. {
  3422. struct snd_pcm_runtime *runtime = substream->runtime;
  3423. area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
  3424. return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
  3425. }
  3426. EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
  3427. #endif /* SNDRV_PCM_INFO_MMAP */
  3428. /*
  3429. * mmap DMA buffer
  3430. */
  3431. int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file,
  3432. struct vm_area_struct *area)
  3433. {
  3434. struct snd_pcm_runtime *runtime;
  3435. long size;
  3436. unsigned long offset;
  3437. size_t dma_bytes;
  3438. int err;
  3439. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  3440. if (!(area->vm_flags & (VM_WRITE|VM_READ)))
  3441. return -EINVAL;
  3442. } else {
  3443. if (!(area->vm_flags & VM_READ))
  3444. return -EINVAL;
  3445. }
  3446. runtime = substream->runtime;
  3447. if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
  3448. return -EBADFD;
  3449. if (!(runtime->info & SNDRV_PCM_INFO_MMAP))
  3450. return -ENXIO;
  3451. if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
  3452. runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
  3453. return -EINVAL;
  3454. size = area->vm_end - area->vm_start;
  3455. offset = area->vm_pgoff << PAGE_SHIFT;
  3456. dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
  3457. if ((size_t)size > dma_bytes)
  3458. return -EINVAL;
  3459. if (offset > dma_bytes - size)
  3460. return -EINVAL;
  3461. area->vm_ops = &snd_pcm_vm_ops_data;
  3462. area->vm_private_data = substream;
  3463. if (substream->ops->mmap)
  3464. err = substream->ops->mmap(substream, area);
  3465. else
  3466. err = snd_pcm_lib_default_mmap(substream, area);
  3467. if (!err)
  3468. atomic_inc(&substream->mmap_count);
  3469. return err;
  3470. }
  3471. EXPORT_SYMBOL(snd_pcm_mmap_data);
  3472. static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
  3473. {
  3474. struct snd_pcm_file * pcm_file;
  3475. struct snd_pcm_substream *substream;
  3476. unsigned long offset;
  3477. pcm_file = file->private_data;
  3478. substream = pcm_file->substream;
  3479. if (PCM_RUNTIME_CHECK(substream))
  3480. return -ENXIO;
  3481. offset = area->vm_pgoff << PAGE_SHIFT;
  3482. switch (offset) {
  3483. case SNDRV_PCM_MMAP_OFFSET_STATUS_OLD:
  3484. if (pcm_file->no_compat_mmap || !IS_ENABLED(CONFIG_64BIT))
  3485. return -ENXIO;
  3486. fallthrough;
  3487. case SNDRV_PCM_MMAP_OFFSET_STATUS_NEW:
  3488. if (!pcm_status_mmap_allowed(pcm_file))
  3489. return -ENXIO;
  3490. return snd_pcm_mmap_status(substream, file, area);
  3491. case SNDRV_PCM_MMAP_OFFSET_CONTROL_OLD:
  3492. if (pcm_file->no_compat_mmap || !IS_ENABLED(CONFIG_64BIT))
  3493. return -ENXIO;
  3494. fallthrough;
  3495. case SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW:
  3496. if (!pcm_control_mmap_allowed(pcm_file))
  3497. return -ENXIO;
  3498. return snd_pcm_mmap_control(substream, file, area);
  3499. default:
  3500. return snd_pcm_mmap_data(substream, file, area);
  3501. }
  3502. return 0;
  3503. }
  3504. static int snd_pcm_fasync(int fd, struct file * file, int on)
  3505. {
  3506. struct snd_pcm_file * pcm_file;
  3507. struct snd_pcm_substream *substream;
  3508. struct snd_pcm_runtime *runtime;
  3509. pcm_file = file->private_data;
  3510. substream = pcm_file->substream;
  3511. if (PCM_RUNTIME_CHECK(substream))
  3512. return -ENXIO;
  3513. runtime = substream->runtime;
  3514. return fasync_helper(fd, file, on, &runtime->fasync);
  3515. }
  3516. /*
  3517. * ioctl32 compat
  3518. */
  3519. #ifdef CONFIG_COMPAT
  3520. #include "pcm_compat.c"
  3521. #else
  3522. #define snd_pcm_ioctl_compat NULL
  3523. #endif
  3524. /*
  3525. * To be removed helpers to keep binary compatibility
  3526. */
  3527. #ifdef CONFIG_SND_SUPPORT_OLD_API
  3528. #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5))
  3529. #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5))
  3530. static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params,
  3531. struct snd_pcm_hw_params_old *oparams)
  3532. {
  3533. unsigned int i;
  3534. memset(params, 0, sizeof(*params));
  3535. params->flags = oparams->flags;
  3536. for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
  3537. params->masks[i].bits[0] = oparams->masks[i];
  3538. memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals));
  3539. params->rmask = __OLD_TO_NEW_MASK(oparams->rmask);
  3540. params->cmask = __OLD_TO_NEW_MASK(oparams->cmask);
  3541. params->info = oparams->info;
  3542. params->msbits = oparams->msbits;
  3543. params->rate_num = oparams->rate_num;
  3544. params->rate_den = oparams->rate_den;
  3545. params->fifo_size = oparams->fifo_size;
  3546. }
  3547. static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams,
  3548. struct snd_pcm_hw_params *params)
  3549. {
  3550. unsigned int i;
  3551. memset(oparams, 0, sizeof(*oparams));
  3552. oparams->flags = params->flags;
  3553. for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
  3554. oparams->masks[i] = params->masks[i].bits[0];
  3555. memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals));
  3556. oparams->rmask = __NEW_TO_OLD_MASK(params->rmask);
  3557. oparams->cmask = __NEW_TO_OLD_MASK(params->cmask);
  3558. oparams->info = params->info;
  3559. oparams->msbits = params->msbits;
  3560. oparams->rate_num = params->rate_num;
  3561. oparams->rate_den = params->rate_den;
  3562. oparams->fifo_size = params->fifo_size;
  3563. }
  3564. static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
  3565. struct snd_pcm_hw_params_old __user * _oparams)
  3566. {
  3567. struct snd_pcm_hw_params *params;
  3568. struct snd_pcm_hw_params_old *oparams = NULL;
  3569. int err;
  3570. params = kmalloc(sizeof(*params), GFP_KERNEL);
  3571. if (!params)
  3572. return -ENOMEM;
  3573. oparams = memdup_user(_oparams, sizeof(*oparams));
  3574. if (IS_ERR(oparams)) {
  3575. err = PTR_ERR(oparams);
  3576. goto out;
  3577. }
  3578. snd_pcm_hw_convert_from_old_params(params, oparams);
  3579. err = snd_pcm_hw_refine(substream, params);
  3580. if (err < 0)
  3581. goto out_old;
  3582. err = fixup_unreferenced_params(substream, params);
  3583. if (err < 0)
  3584. goto out_old;
  3585. snd_pcm_hw_convert_to_old_params(oparams, params);
  3586. if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
  3587. err = -EFAULT;
  3588. out_old:
  3589. kfree(oparams);
  3590. out:
  3591. kfree(params);
  3592. return err;
  3593. }
  3594. static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
  3595. struct snd_pcm_hw_params_old __user * _oparams)
  3596. {
  3597. struct snd_pcm_hw_params *params;
  3598. struct snd_pcm_hw_params_old *oparams = NULL;
  3599. int err;
  3600. params = kmalloc(sizeof(*params), GFP_KERNEL);
  3601. if (!params)
  3602. return -ENOMEM;
  3603. oparams = memdup_user(_oparams, sizeof(*oparams));
  3604. if (IS_ERR(oparams)) {
  3605. err = PTR_ERR(oparams);
  3606. goto out;
  3607. }
  3608. snd_pcm_hw_convert_from_old_params(params, oparams);
  3609. err = snd_pcm_hw_params(substream, params);
  3610. if (err < 0)
  3611. goto out_old;
  3612. snd_pcm_hw_convert_to_old_params(oparams, params);
  3613. if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
  3614. err = -EFAULT;
  3615. out_old:
  3616. kfree(oparams);
  3617. out:
  3618. kfree(params);
  3619. return err;
  3620. }
  3621. #endif /* CONFIG_SND_SUPPORT_OLD_API */
  3622. #ifndef CONFIG_MMU
  3623. static unsigned long snd_pcm_get_unmapped_area(struct file *file,
  3624. unsigned long addr,
  3625. unsigned long len,
  3626. unsigned long pgoff,
  3627. unsigned long flags)
  3628. {
  3629. struct snd_pcm_file *pcm_file = file->private_data;
  3630. struct snd_pcm_substream *substream = pcm_file->substream;
  3631. struct snd_pcm_runtime *runtime = substream->runtime;
  3632. unsigned long offset = pgoff << PAGE_SHIFT;
  3633. switch (offset) {
  3634. case SNDRV_PCM_MMAP_OFFSET_STATUS_NEW:
  3635. return (unsigned long)runtime->status;
  3636. case SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW:
  3637. return (unsigned long)runtime->control;
  3638. default:
  3639. return (unsigned long)runtime->dma_area + offset;
  3640. }
  3641. }
  3642. #else
  3643. # define snd_pcm_get_unmapped_area NULL
  3644. #endif
  3645. /*
  3646. * Register section
  3647. */
  3648. const struct file_operations snd_pcm_f_ops[2] = {
  3649. {
  3650. .owner = THIS_MODULE,
  3651. .write = snd_pcm_write,
  3652. .write_iter = snd_pcm_writev,
  3653. .open = snd_pcm_playback_open,
  3654. .release = snd_pcm_release,
  3655. .llseek = no_llseek,
  3656. .poll = snd_pcm_poll,
  3657. .unlocked_ioctl = snd_pcm_ioctl,
  3658. .compat_ioctl = snd_pcm_ioctl_compat,
  3659. .mmap = snd_pcm_mmap,
  3660. .fasync = snd_pcm_fasync,
  3661. .get_unmapped_area = snd_pcm_get_unmapped_area,
  3662. },
  3663. {
  3664. .owner = THIS_MODULE,
  3665. .read = snd_pcm_read,
  3666. .read_iter = snd_pcm_readv,
  3667. .open = snd_pcm_capture_open,
  3668. .release = snd_pcm_release,
  3669. .llseek = no_llseek,
  3670. .poll = snd_pcm_poll,
  3671. .unlocked_ioctl = snd_pcm_ioctl,
  3672. .compat_ioctl = snd_pcm_ioctl_compat,
  3673. .mmap = snd_pcm_mmap,
  3674. .fasync = snd_pcm_fasync,
  3675. .get_unmapped_area = snd_pcm_get_unmapped_area,
  3676. }
  3677. };