rx.c 136 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2002-2005, Instant802 Networks, Inc.
  4. * Copyright 2005-2006, Devicescape Software, Inc.
  5. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  6. * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
  7. * Copyright 2013-2014 Intel Mobile Communications GmbH
  8. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
  9. * Copyright (C) 2018-2021 Intel Corporation
  10. */
  11. #include <linux/jiffies.h>
  12. #include <linux/slab.h>
  13. #include <linux/kernel.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/etherdevice.h>
  17. #include <linux/rcupdate.h>
  18. #include <linux/export.h>
  19. #include <linux/bitops.h>
  20. #include <net/mac80211.h>
  21. #include <net/ieee80211_radiotap.h>
  22. #include <asm/unaligned.h>
  23. #include "ieee80211_i.h"
  24. #include "driver-ops.h"
  25. #include "led.h"
  26. #include "mesh.h"
  27. #include "wep.h"
  28. #include "wpa.h"
  29. #include "tkip.h"
  30. #include "wme.h"
  31. #include "rate.h"
  32. static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
  33. {
  34. struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
  35. u64_stats_update_begin(&tstats->syncp);
  36. tstats->rx_packets++;
  37. tstats->rx_bytes += len;
  38. u64_stats_update_end(&tstats->syncp);
  39. }
  40. /*
  41. * monitor mode reception
  42. *
  43. * This function cleans up the SKB, i.e. it removes all the stuff
  44. * only useful for monitoring.
  45. */
  46. static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb,
  47. unsigned int present_fcs_len,
  48. unsigned int rtap_space)
  49. {
  50. struct ieee80211_hdr *hdr;
  51. unsigned int hdrlen;
  52. __le16 fc;
  53. if (present_fcs_len)
  54. __pskb_trim(skb, skb->len - present_fcs_len);
  55. __pskb_pull(skb, rtap_space);
  56. hdr = (void *)skb->data;
  57. fc = hdr->frame_control;
  58. /*
  59. * Remove the HT-Control field (if present) on management
  60. * frames after we've sent the frame to monitoring. We
  61. * (currently) don't need it, and don't properly parse
  62. * frames with it present, due to the assumption of a
  63. * fixed management header length.
  64. */
  65. if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc)))
  66. return skb;
  67. hdrlen = ieee80211_hdrlen(fc);
  68. hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER);
  69. if (!pskb_may_pull(skb, hdrlen)) {
  70. dev_kfree_skb(skb);
  71. return NULL;
  72. }
  73. memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data,
  74. hdrlen - IEEE80211_HT_CTL_LEN);
  75. __pskb_pull(skb, IEEE80211_HT_CTL_LEN);
  76. return skb;
  77. }
  78. static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
  79. unsigned int rtap_space)
  80. {
  81. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  82. struct ieee80211_hdr *hdr;
  83. hdr = (void *)(skb->data + rtap_space);
  84. if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
  85. RX_FLAG_FAILED_PLCP_CRC |
  86. RX_FLAG_ONLY_MONITOR |
  87. RX_FLAG_NO_PSDU))
  88. return true;
  89. if (unlikely(skb->len < 16 + present_fcs_len + rtap_space))
  90. return true;
  91. if (ieee80211_is_ctl(hdr->frame_control) &&
  92. !ieee80211_is_pspoll(hdr->frame_control) &&
  93. !ieee80211_is_back_req(hdr->frame_control))
  94. return true;
  95. return false;
  96. }
  97. static int
  98. ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
  99. struct ieee80211_rx_status *status,
  100. struct sk_buff *skb)
  101. {
  102. int len;
  103. /* always present fields */
  104. len = sizeof(struct ieee80211_radiotap_header) + 8;
  105. /* allocate extra bitmaps */
  106. if (status->chains)
  107. len += 4 * hweight8(status->chains);
  108. /* vendor presence bitmap */
  109. if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)
  110. len += 4;
  111. if (ieee80211_have_rx_timestamp(status)) {
  112. len = ALIGN(len, 8);
  113. len += 8;
  114. }
  115. if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
  116. len += 1;
  117. /* antenna field, if we don't have per-chain info */
  118. if (!status->chains)
  119. len += 1;
  120. /* padding for RX_FLAGS if necessary */
  121. len = ALIGN(len, 2);
  122. if (status->encoding == RX_ENC_HT) /* HT info */
  123. len += 3;
  124. if (status->flag & RX_FLAG_AMPDU_DETAILS) {
  125. len = ALIGN(len, 4);
  126. len += 8;
  127. }
  128. if (status->encoding == RX_ENC_VHT) {
  129. len = ALIGN(len, 2);
  130. len += 12;
  131. }
  132. if (local->hw.radiotap_timestamp.units_pos >= 0) {
  133. len = ALIGN(len, 8);
  134. len += 12;
  135. }
  136. if (status->encoding == RX_ENC_HE &&
  137. status->flag & RX_FLAG_RADIOTAP_HE) {
  138. len = ALIGN(len, 2);
  139. len += 12;
  140. BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12);
  141. }
  142. if (status->encoding == RX_ENC_HE &&
  143. status->flag & RX_FLAG_RADIOTAP_HE_MU) {
  144. len = ALIGN(len, 2);
  145. len += 12;
  146. BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12);
  147. }
  148. if (status->flag & RX_FLAG_NO_PSDU)
  149. len += 1;
  150. if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
  151. len = ALIGN(len, 2);
  152. len += 4;
  153. BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4);
  154. }
  155. if (status->chains) {
  156. /* antenna and antenna signal fields */
  157. len += 2 * hweight8(status->chains);
  158. }
  159. if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
  160. struct ieee80211_vendor_radiotap *rtap;
  161. int vendor_data_offset = 0;
  162. /*
  163. * The position to look at depends on the existence (or non-
  164. * existence) of other elements, so take that into account...
  165. */
  166. if (status->flag & RX_FLAG_RADIOTAP_HE)
  167. vendor_data_offset +=
  168. sizeof(struct ieee80211_radiotap_he);
  169. if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
  170. vendor_data_offset +=
  171. sizeof(struct ieee80211_radiotap_he_mu);
  172. if (status->flag & RX_FLAG_RADIOTAP_LSIG)
  173. vendor_data_offset +=
  174. sizeof(struct ieee80211_radiotap_lsig);
  175. rtap = (void *)&skb->data[vendor_data_offset];
  176. /* alignment for fixed 6-byte vendor data header */
  177. len = ALIGN(len, 2);
  178. /* vendor data header */
  179. len += 6;
  180. if (WARN_ON(rtap->align == 0))
  181. rtap->align = 1;
  182. len = ALIGN(len, rtap->align);
  183. len += rtap->len + rtap->pad;
  184. }
  185. return len;
  186. }
  187. static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
  188. struct sk_buff *skb,
  189. int rtap_space)
  190. {
  191. struct {
  192. struct ieee80211_hdr_3addr hdr;
  193. u8 category;
  194. u8 action_code;
  195. } __packed __aligned(2) action;
  196. if (!sdata)
  197. return;
  198. BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
  199. if (skb->len < rtap_space + sizeof(action) +
  200. VHT_MUMIMO_GROUPS_DATA_LEN)
  201. return;
  202. if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
  203. return;
  204. skb_copy_bits(skb, rtap_space, &action, sizeof(action));
  205. if (!ieee80211_is_action(action.hdr.frame_control))
  206. return;
  207. if (action.category != WLAN_CATEGORY_VHT)
  208. return;
  209. if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
  210. return;
  211. if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
  212. return;
  213. skb = skb_copy(skb, GFP_ATOMIC);
  214. if (!skb)
  215. return;
  216. skb_queue_tail(&sdata->skb_queue, skb);
  217. ieee80211_queue_work(&sdata->local->hw, &sdata->work);
  218. }
  219. /*
  220. * ieee80211_add_rx_radiotap_header - add radiotap header
  221. *
  222. * add a radiotap header containing all the fields which the hardware provided.
  223. */
  224. static void
  225. ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
  226. struct sk_buff *skb,
  227. struct ieee80211_rate *rate,
  228. int rtap_len, bool has_fcs)
  229. {
  230. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  231. struct ieee80211_radiotap_header *rthdr;
  232. unsigned char *pos;
  233. __le32 *it_present;
  234. u32 it_present_val;
  235. u16 rx_flags = 0;
  236. u16 channel_flags = 0;
  237. int mpdulen, chain;
  238. unsigned long chains = status->chains;
  239. struct ieee80211_vendor_radiotap rtap = {};
  240. struct ieee80211_radiotap_he he = {};
  241. struct ieee80211_radiotap_he_mu he_mu = {};
  242. struct ieee80211_radiotap_lsig lsig = {};
  243. if (status->flag & RX_FLAG_RADIOTAP_HE) {
  244. he = *(struct ieee80211_radiotap_he *)skb->data;
  245. skb_pull(skb, sizeof(he));
  246. WARN_ON_ONCE(status->encoding != RX_ENC_HE);
  247. }
  248. if (status->flag & RX_FLAG_RADIOTAP_HE_MU) {
  249. he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data;
  250. skb_pull(skb, sizeof(he_mu));
  251. }
  252. if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
  253. lsig = *(struct ieee80211_radiotap_lsig *)skb->data;
  254. skb_pull(skb, sizeof(lsig));
  255. }
  256. if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
  257. rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
  258. /* rtap.len and rtap.pad are undone immediately */
  259. skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad);
  260. }
  261. mpdulen = skb->len;
  262. if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
  263. mpdulen += FCS_LEN;
  264. rthdr = skb_push(skb, rtap_len);
  265. memset(rthdr, 0, rtap_len - rtap.len - rtap.pad);
  266. it_present = &rthdr->it_present;
  267. /* radiotap header, set always present flags */
  268. rthdr->it_len = cpu_to_le16(rtap_len);
  269. it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
  270. BIT(IEEE80211_RADIOTAP_CHANNEL) |
  271. BIT(IEEE80211_RADIOTAP_RX_FLAGS);
  272. if (!status->chains)
  273. it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
  274. for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
  275. it_present_val |=
  276. BIT(IEEE80211_RADIOTAP_EXT) |
  277. BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
  278. put_unaligned_le32(it_present_val, it_present);
  279. it_present++;
  280. it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
  281. BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
  282. }
  283. if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
  284. it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
  285. BIT(IEEE80211_RADIOTAP_EXT);
  286. put_unaligned_le32(it_present_val, it_present);
  287. it_present++;
  288. it_present_val = rtap.present;
  289. }
  290. put_unaligned_le32(it_present_val, it_present);
  291. pos = (void *)(it_present + 1);
  292. /* the order of the following fields is important */
  293. /* IEEE80211_RADIOTAP_TSFT */
  294. if (ieee80211_have_rx_timestamp(status)) {
  295. /* padding */
  296. while ((pos - (u8 *)rthdr) & 7)
  297. *pos++ = 0;
  298. put_unaligned_le64(
  299. ieee80211_calculate_rx_timestamp(local, status,
  300. mpdulen, 0),
  301. pos);
  302. rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
  303. pos += 8;
  304. }
  305. /* IEEE80211_RADIOTAP_FLAGS */
  306. if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
  307. *pos |= IEEE80211_RADIOTAP_F_FCS;
  308. if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
  309. *pos |= IEEE80211_RADIOTAP_F_BADFCS;
  310. if (status->enc_flags & RX_ENC_FLAG_SHORTPRE)
  311. *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
  312. pos++;
  313. /* IEEE80211_RADIOTAP_RATE */
  314. if (!rate || status->encoding != RX_ENC_LEGACY) {
  315. /*
  316. * Without rate information don't add it. If we have,
  317. * MCS information is a separate field in radiotap,
  318. * added below. The byte here is needed as padding
  319. * for the channel though, so initialise it to 0.
  320. */
  321. *pos = 0;
  322. } else {
  323. int shift = 0;
  324. rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
  325. if (status->bw == RATE_INFO_BW_10)
  326. shift = 1;
  327. else if (status->bw == RATE_INFO_BW_5)
  328. shift = 2;
  329. *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
  330. }
  331. pos++;
  332. /* IEEE80211_RADIOTAP_CHANNEL */
  333. /* TODO: frequency offset in KHz */
  334. put_unaligned_le16(status->freq, pos);
  335. pos += 2;
  336. if (status->bw == RATE_INFO_BW_10)
  337. channel_flags |= IEEE80211_CHAN_HALF;
  338. else if (status->bw == RATE_INFO_BW_5)
  339. channel_flags |= IEEE80211_CHAN_QUARTER;
  340. if (status->band == NL80211_BAND_5GHZ ||
  341. status->band == NL80211_BAND_6GHZ)
  342. channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
  343. else if (status->encoding != RX_ENC_LEGACY)
  344. channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
  345. else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
  346. channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
  347. else if (rate)
  348. channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
  349. else
  350. channel_flags |= IEEE80211_CHAN_2GHZ;
  351. put_unaligned_le16(channel_flags, pos);
  352. pos += 2;
  353. /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
  354. if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
  355. !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
  356. *pos = status->signal;
  357. rthdr->it_present |=
  358. cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
  359. pos++;
  360. }
  361. /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
  362. if (!status->chains) {
  363. /* IEEE80211_RADIOTAP_ANTENNA */
  364. *pos = status->antenna;
  365. pos++;
  366. }
  367. /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
  368. /* IEEE80211_RADIOTAP_RX_FLAGS */
  369. /* ensure 2 byte alignment for the 2 byte field as required */
  370. if ((pos - (u8 *)rthdr) & 1)
  371. *pos++ = 0;
  372. if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
  373. rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
  374. put_unaligned_le16(rx_flags, pos);
  375. pos += 2;
  376. if (status->encoding == RX_ENC_HT) {
  377. unsigned int stbc;
  378. rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
  379. *pos++ = local->hw.radiotap_mcs_details;
  380. *pos = 0;
  381. if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
  382. *pos |= IEEE80211_RADIOTAP_MCS_SGI;
  383. if (status->bw == RATE_INFO_BW_40)
  384. *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
  385. if (status->enc_flags & RX_ENC_FLAG_HT_GF)
  386. *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
  387. if (status->enc_flags & RX_ENC_FLAG_LDPC)
  388. *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
  389. stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT;
  390. *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
  391. pos++;
  392. *pos++ = status->rate_idx;
  393. }
  394. if (status->flag & RX_FLAG_AMPDU_DETAILS) {
  395. u16 flags = 0;
  396. /* ensure 4 byte alignment */
  397. while ((pos - (u8 *)rthdr) & 3)
  398. pos++;
  399. rthdr->it_present |=
  400. cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
  401. put_unaligned_le32(status->ampdu_reference, pos);
  402. pos += 4;
  403. if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
  404. flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
  405. if (status->flag & RX_FLAG_AMPDU_IS_LAST)
  406. flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
  407. if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
  408. flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
  409. if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
  410. flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
  411. if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN)
  412. flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN;
  413. if (status->flag & RX_FLAG_AMPDU_EOF_BIT)
  414. flags |= IEEE80211_RADIOTAP_AMPDU_EOF;
  415. put_unaligned_le16(flags, pos);
  416. pos += 2;
  417. if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
  418. *pos++ = status->ampdu_delimiter_crc;
  419. else
  420. *pos++ = 0;
  421. *pos++ = 0;
  422. }
  423. if (status->encoding == RX_ENC_VHT) {
  424. u16 known = local->hw.radiotap_vht_details;
  425. rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
  426. put_unaligned_le16(known, pos);
  427. pos += 2;
  428. /* flags */
  429. if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
  430. *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
  431. /* in VHT, STBC is binary */
  432. if (status->enc_flags & RX_ENC_FLAG_STBC_MASK)
  433. *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
  434. if (status->enc_flags & RX_ENC_FLAG_BF)
  435. *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
  436. pos++;
  437. /* bandwidth */
  438. switch (status->bw) {
  439. case RATE_INFO_BW_80:
  440. *pos++ = 4;
  441. break;
  442. case RATE_INFO_BW_160:
  443. *pos++ = 11;
  444. break;
  445. case RATE_INFO_BW_40:
  446. *pos++ = 1;
  447. break;
  448. default:
  449. *pos++ = 0;
  450. }
  451. /* MCS/NSS */
  452. *pos = (status->rate_idx << 4) | status->nss;
  453. pos += 4;
  454. /* coding field */
  455. if (status->enc_flags & RX_ENC_FLAG_LDPC)
  456. *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
  457. pos++;
  458. /* group ID */
  459. pos++;
  460. /* partial_aid */
  461. pos += 2;
  462. }
  463. if (local->hw.radiotap_timestamp.units_pos >= 0) {
  464. u16 accuracy = 0;
  465. u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
  466. rthdr->it_present |=
  467. cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP);
  468. /* ensure 8 byte alignment */
  469. while ((pos - (u8 *)rthdr) & 7)
  470. pos++;
  471. put_unaligned_le64(status->device_timestamp, pos);
  472. pos += sizeof(u64);
  473. if (local->hw.radiotap_timestamp.accuracy >= 0) {
  474. accuracy = local->hw.radiotap_timestamp.accuracy;
  475. flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
  476. }
  477. put_unaligned_le16(accuracy, pos);
  478. pos += sizeof(u16);
  479. *pos++ = local->hw.radiotap_timestamp.units_pos;
  480. *pos++ = flags;
  481. }
  482. if (status->encoding == RX_ENC_HE &&
  483. status->flag & RX_FLAG_RADIOTAP_HE) {
  484. #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f)
  485. if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) {
  486. he.data6 |= HE_PREP(DATA6_NSTS,
  487. FIELD_GET(RX_ENC_FLAG_STBC_MASK,
  488. status->enc_flags));
  489. he.data3 |= HE_PREP(DATA3_STBC, 1);
  490. } else {
  491. he.data6 |= HE_PREP(DATA6_NSTS, status->nss);
  492. }
  493. #define CHECK_GI(s) \
  494. BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \
  495. (int)NL80211_RATE_INFO_HE_GI_##s)
  496. CHECK_GI(0_8);
  497. CHECK_GI(1_6);
  498. CHECK_GI(3_2);
  499. he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx);
  500. he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm);
  501. he.data3 |= HE_PREP(DATA3_CODING,
  502. !!(status->enc_flags & RX_ENC_FLAG_LDPC));
  503. he.data5 |= HE_PREP(DATA5_GI, status->he_gi);
  504. switch (status->bw) {
  505. case RATE_INFO_BW_20:
  506. he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
  507. IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
  508. break;
  509. case RATE_INFO_BW_40:
  510. he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
  511. IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ);
  512. break;
  513. case RATE_INFO_BW_80:
  514. he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
  515. IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ);
  516. break;
  517. case RATE_INFO_BW_160:
  518. he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
  519. IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ);
  520. break;
  521. case RATE_INFO_BW_HE_RU:
  522. #define CHECK_RU_ALLOC(s) \
  523. BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \
  524. NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4)
  525. CHECK_RU_ALLOC(26);
  526. CHECK_RU_ALLOC(52);
  527. CHECK_RU_ALLOC(106);
  528. CHECK_RU_ALLOC(242);
  529. CHECK_RU_ALLOC(484);
  530. CHECK_RU_ALLOC(996);
  531. CHECK_RU_ALLOC(2x996);
  532. he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
  533. status->he_ru + 4);
  534. break;
  535. default:
  536. WARN_ONCE(1, "Invalid SU BW %d\n", status->bw);
  537. }
  538. /* ensure 2 byte alignment */
  539. while ((pos - (u8 *)rthdr) & 1)
  540. pos++;
  541. rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE);
  542. memcpy(pos, &he, sizeof(he));
  543. pos += sizeof(he);
  544. }
  545. if (status->encoding == RX_ENC_HE &&
  546. status->flag & RX_FLAG_RADIOTAP_HE_MU) {
  547. /* ensure 2 byte alignment */
  548. while ((pos - (u8 *)rthdr) & 1)
  549. pos++;
  550. rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU);
  551. memcpy(pos, &he_mu, sizeof(he_mu));
  552. pos += sizeof(he_mu);
  553. }
  554. if (status->flag & RX_FLAG_NO_PSDU) {
  555. rthdr->it_present |=
  556. cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU);
  557. *pos++ = status->zero_length_psdu_type;
  558. }
  559. if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
  560. /* ensure 2 byte alignment */
  561. while ((pos - (u8 *)rthdr) & 1)
  562. pos++;
  563. rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG);
  564. memcpy(pos, &lsig, sizeof(lsig));
  565. pos += sizeof(lsig);
  566. }
  567. for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
  568. *pos++ = status->chain_signal[chain];
  569. *pos++ = chain;
  570. }
  571. if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
  572. /* ensure 2 byte alignment for the vendor field as required */
  573. if ((pos - (u8 *)rthdr) & 1)
  574. *pos++ = 0;
  575. *pos++ = rtap.oui[0];
  576. *pos++ = rtap.oui[1];
  577. *pos++ = rtap.oui[2];
  578. *pos++ = rtap.subns;
  579. put_unaligned_le16(rtap.len, pos);
  580. pos += 2;
  581. /* align the actual payload as requested */
  582. while ((pos - (u8 *)rthdr) & (rtap.align - 1))
  583. *pos++ = 0;
  584. /* data (and possible padding) already follows */
  585. }
  586. }
  587. static struct sk_buff *
  588. ieee80211_make_monitor_skb(struct ieee80211_local *local,
  589. struct sk_buff **origskb,
  590. struct ieee80211_rate *rate,
  591. int rtap_space, bool use_origskb)
  592. {
  593. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb);
  594. int rt_hdrlen, needed_headroom;
  595. struct sk_buff *skb;
  596. /* room for the radiotap header based on driver features */
  597. rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb);
  598. needed_headroom = rt_hdrlen - rtap_space;
  599. if (use_origskb) {
  600. /* only need to expand headroom if necessary */
  601. skb = *origskb;
  602. *origskb = NULL;
  603. /*
  604. * This shouldn't trigger often because most devices have an
  605. * RX header they pull before we get here, and that should
  606. * be big enough for our radiotap information. We should
  607. * probably export the length to drivers so that we can have
  608. * them allocate enough headroom to start with.
  609. */
  610. if (skb_headroom(skb) < needed_headroom &&
  611. pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
  612. dev_kfree_skb(skb);
  613. return NULL;
  614. }
  615. } else {
  616. /*
  617. * Need to make a copy and possibly remove radiotap header
  618. * and FCS from the original.
  619. */
  620. skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
  621. if (!skb)
  622. return NULL;
  623. }
  624. /* prepend radiotap information */
  625. ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
  626. skb_reset_mac_header(skb);
  627. skb->ip_summed = CHECKSUM_UNNECESSARY;
  628. skb->pkt_type = PACKET_OTHERHOST;
  629. skb->protocol = htons(ETH_P_802_2);
  630. return skb;
  631. }
  632. /*
  633. * This function copies a received frame to all monitor interfaces and
  634. * returns a cleaned-up SKB that no longer includes the FCS nor the
  635. * radiotap header the driver might have added.
  636. */
  637. static struct sk_buff *
  638. ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
  639. struct ieee80211_rate *rate)
  640. {
  641. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
  642. struct ieee80211_sub_if_data *sdata;
  643. struct sk_buff *monskb = NULL;
  644. int present_fcs_len = 0;
  645. unsigned int rtap_space = 0;
  646. struct ieee80211_sub_if_data *monitor_sdata =
  647. rcu_dereference(local->monitor_sdata);
  648. bool only_monitor = false;
  649. unsigned int min_head_len;
  650. if (status->flag & RX_FLAG_RADIOTAP_HE)
  651. rtap_space += sizeof(struct ieee80211_radiotap_he);
  652. if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
  653. rtap_space += sizeof(struct ieee80211_radiotap_he_mu);
  654. if (status->flag & RX_FLAG_RADIOTAP_LSIG)
  655. rtap_space += sizeof(struct ieee80211_radiotap_lsig);
  656. if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
  657. struct ieee80211_vendor_radiotap *rtap =
  658. (void *)(origskb->data + rtap_space);
  659. rtap_space += sizeof(*rtap) + rtap->len + rtap->pad;
  660. }
  661. min_head_len = rtap_space;
  662. /*
  663. * First, we may need to make a copy of the skb because
  664. * (1) we need to modify it for radiotap (if not present), and
  665. * (2) the other RX handlers will modify the skb we got.
  666. *
  667. * We don't need to, of course, if we aren't going to return
  668. * the SKB because it has a bad FCS/PLCP checksum.
  669. */
  670. if (!(status->flag & RX_FLAG_NO_PSDU)) {
  671. if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
  672. if (unlikely(origskb->len <= FCS_LEN + rtap_space)) {
  673. /* driver bug */
  674. WARN_ON(1);
  675. dev_kfree_skb(origskb);
  676. return NULL;
  677. }
  678. present_fcs_len = FCS_LEN;
  679. }
  680. /* also consider the hdr->frame_control */
  681. min_head_len += 2;
  682. }
  683. /* ensure that the expected data elements are in skb head */
  684. if (!pskb_may_pull(origskb, min_head_len)) {
  685. dev_kfree_skb(origskb);
  686. return NULL;
  687. }
  688. only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space);
  689. if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
  690. if (only_monitor) {
  691. dev_kfree_skb(origskb);
  692. return NULL;
  693. }
  694. return ieee80211_clean_skb(origskb, present_fcs_len,
  695. rtap_space);
  696. }
  697. ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space);
  698. list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) {
  699. bool last_monitor = list_is_last(&sdata->u.mntr.list,
  700. &local->mon_list);
  701. if (!monskb)
  702. monskb = ieee80211_make_monitor_skb(local, &origskb,
  703. rate, rtap_space,
  704. only_monitor &&
  705. last_monitor);
  706. if (monskb) {
  707. struct sk_buff *skb;
  708. if (last_monitor) {
  709. skb = monskb;
  710. monskb = NULL;
  711. } else {
  712. skb = skb_clone(monskb, GFP_ATOMIC);
  713. }
  714. if (skb) {
  715. skb->dev = sdata->dev;
  716. ieee80211_rx_stats(skb->dev, skb->len);
  717. netif_receive_skb(skb);
  718. }
  719. }
  720. if (last_monitor)
  721. break;
  722. }
  723. /* this happens if last_monitor was erroneously false */
  724. dev_kfree_skb(monskb);
  725. /* ditto */
  726. if (!origskb)
  727. return NULL;
  728. return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space);
  729. }
  730. static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
  731. {
  732. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
  733. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  734. int tid, seqno_idx, security_idx;
  735. /* does the frame have a qos control field? */
  736. if (ieee80211_is_data_qos(hdr->frame_control)) {
  737. u8 *qc = ieee80211_get_qos_ctl(hdr);
  738. /* frame has qos control */
  739. tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
  740. if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
  741. status->rx_flags |= IEEE80211_RX_AMSDU;
  742. seqno_idx = tid;
  743. security_idx = tid;
  744. } else {
  745. /*
  746. * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
  747. *
  748. * Sequence numbers for management frames, QoS data
  749. * frames with a broadcast/multicast address in the
  750. * Address 1 field, and all non-QoS data frames sent
  751. * by QoS STAs are assigned using an additional single
  752. * modulo-4096 counter, [...]
  753. *
  754. * We also use that counter for non-QoS STAs.
  755. */
  756. seqno_idx = IEEE80211_NUM_TIDS;
  757. security_idx = 0;
  758. if (ieee80211_is_mgmt(hdr->frame_control))
  759. security_idx = IEEE80211_NUM_TIDS;
  760. tid = 0;
  761. }
  762. rx->seqno_idx = seqno_idx;
  763. rx->security_idx = security_idx;
  764. /* Set skb->priority to 1d tag if highest order bit of TID is not set.
  765. * For now, set skb->priority to 0 for other cases. */
  766. rx->skb->priority = (tid > 7) ? 0 : tid;
  767. }
  768. /**
  769. * DOC: Packet alignment
  770. *
  771. * Drivers always need to pass packets that are aligned to two-byte boundaries
  772. * to the stack.
  773. *
  774. * Additionally, should, if possible, align the payload data in a way that
  775. * guarantees that the contained IP header is aligned to a four-byte
  776. * boundary. In the case of regular frames, this simply means aligning the
  777. * payload to a four-byte boundary (because either the IP header is directly
  778. * contained, or IV/RFC1042 headers that have a length divisible by four are
  779. * in front of it). If the payload data is not properly aligned and the
  780. * architecture doesn't support efficient unaligned operations, mac80211
  781. * will align the data.
  782. *
  783. * With A-MSDU frames, however, the payload data address must yield two modulo
  784. * four because there are 14-byte 802.3 headers within the A-MSDU frames that
  785. * push the IP header further back to a multiple of four again. Thankfully, the
  786. * specs were sane enough this time around to require padding each A-MSDU
  787. * subframe to a length that is a multiple of four.
  788. *
  789. * Padding like Atheros hardware adds which is between the 802.11 header and
  790. * the payload is not supported, the driver is required to move the 802.11
  791. * header to be directly in front of the payload in that case.
  792. */
  793. static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
  794. {
  795. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  796. WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
  797. #endif
  798. }
  799. /* rx handlers */
  800. static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
  801. {
  802. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  803. if (is_multicast_ether_addr(hdr->addr1))
  804. return 0;
  805. return ieee80211_is_robust_mgmt_frame(skb);
  806. }
  807. static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
  808. {
  809. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  810. if (!is_multicast_ether_addr(hdr->addr1))
  811. return 0;
  812. return ieee80211_is_robust_mgmt_frame(skb);
  813. }
  814. /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
  815. static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
  816. {
  817. struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
  818. struct ieee80211_mmie *mmie;
  819. struct ieee80211_mmie_16 *mmie16;
  820. if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
  821. return -1;
  822. if (!ieee80211_is_robust_mgmt_frame(skb) &&
  823. !ieee80211_is_beacon(hdr->frame_control))
  824. return -1; /* not a robust management frame */
  825. mmie = (struct ieee80211_mmie *)
  826. (skb->data + skb->len - sizeof(*mmie));
  827. if (mmie->element_id == WLAN_EID_MMIE &&
  828. mmie->length == sizeof(*mmie) - 2)
  829. return le16_to_cpu(mmie->key_id);
  830. mmie16 = (struct ieee80211_mmie_16 *)
  831. (skb->data + skb->len - sizeof(*mmie16));
  832. if (skb->len >= 24 + sizeof(*mmie16) &&
  833. mmie16->element_id == WLAN_EID_MMIE &&
  834. mmie16->length == sizeof(*mmie16) - 2)
  835. return le16_to_cpu(mmie16->key_id);
  836. return -1;
  837. }
  838. static int ieee80211_get_keyid(struct sk_buff *skb,
  839. const struct ieee80211_cipher_scheme *cs)
  840. {
  841. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  842. __le16 fc;
  843. int hdrlen;
  844. int minlen;
  845. u8 key_idx_off;
  846. u8 key_idx_shift;
  847. u8 keyid;
  848. fc = hdr->frame_control;
  849. hdrlen = ieee80211_hdrlen(fc);
  850. if (cs) {
  851. minlen = hdrlen + cs->hdr_len;
  852. key_idx_off = hdrlen + cs->key_idx_off;
  853. key_idx_shift = cs->key_idx_shift;
  854. } else {
  855. /* WEP, TKIP, CCMP and GCMP */
  856. minlen = hdrlen + IEEE80211_WEP_IV_LEN;
  857. key_idx_off = hdrlen + 3;
  858. key_idx_shift = 6;
  859. }
  860. if (unlikely(skb->len < minlen))
  861. return -EINVAL;
  862. skb_copy_bits(skb, key_idx_off, &keyid, 1);
  863. if (cs)
  864. keyid &= cs->key_idx_mask;
  865. keyid >>= key_idx_shift;
  866. /* cs could use more than the usual two bits for the keyid */
  867. if (unlikely(keyid >= NUM_DEFAULT_KEYS))
  868. return -EINVAL;
  869. return keyid;
  870. }
  871. static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
  872. {
  873. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
  874. char *dev_addr = rx->sdata->vif.addr;
  875. if (ieee80211_is_data(hdr->frame_control)) {
  876. if (is_multicast_ether_addr(hdr->addr1)) {
  877. if (ieee80211_has_tods(hdr->frame_control) ||
  878. !ieee80211_has_fromds(hdr->frame_control))
  879. return RX_DROP_MONITOR;
  880. if (ether_addr_equal(hdr->addr3, dev_addr))
  881. return RX_DROP_MONITOR;
  882. } else {
  883. if (!ieee80211_has_a4(hdr->frame_control))
  884. return RX_DROP_MONITOR;
  885. if (ether_addr_equal(hdr->addr4, dev_addr))
  886. return RX_DROP_MONITOR;
  887. }
  888. }
  889. /* If there is not an established peer link and this is not a peer link
  890. * establisment frame, beacon or probe, drop the frame.
  891. */
  892. if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
  893. struct ieee80211_mgmt *mgmt;
  894. if (!ieee80211_is_mgmt(hdr->frame_control))
  895. return RX_DROP_MONITOR;
  896. if (ieee80211_is_action(hdr->frame_control)) {
  897. u8 category;
  898. /* make sure category field is present */
  899. if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
  900. return RX_DROP_MONITOR;
  901. mgmt = (struct ieee80211_mgmt *)hdr;
  902. category = mgmt->u.action.category;
  903. if (category != WLAN_CATEGORY_MESH_ACTION &&
  904. category != WLAN_CATEGORY_SELF_PROTECTED)
  905. return RX_DROP_MONITOR;
  906. return RX_CONTINUE;
  907. }
  908. if (ieee80211_is_probe_req(hdr->frame_control) ||
  909. ieee80211_is_probe_resp(hdr->frame_control) ||
  910. ieee80211_is_beacon(hdr->frame_control) ||
  911. ieee80211_is_auth(hdr->frame_control))
  912. return RX_CONTINUE;
  913. return RX_DROP_MONITOR;
  914. }
  915. return RX_CONTINUE;
  916. }
  917. static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
  918. int index)
  919. {
  920. struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
  921. struct sk_buff *tail = skb_peek_tail(frames);
  922. struct ieee80211_rx_status *status;
  923. if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
  924. return true;
  925. if (!tail)
  926. return false;
  927. status = IEEE80211_SKB_RXCB(tail);
  928. if (status->flag & RX_FLAG_AMSDU_MORE)
  929. return false;
  930. return true;
  931. }
  932. static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
  933. struct tid_ampdu_rx *tid_agg_rx,
  934. int index,
  935. struct sk_buff_head *frames)
  936. {
  937. struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
  938. struct sk_buff *skb;
  939. struct ieee80211_rx_status *status;
  940. lockdep_assert_held(&tid_agg_rx->reorder_lock);
  941. if (skb_queue_empty(skb_list))
  942. goto no_frame;
  943. if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
  944. __skb_queue_purge(skb_list);
  945. goto no_frame;
  946. }
  947. /* release frames from the reorder ring buffer */
  948. tid_agg_rx->stored_mpdu_num--;
  949. while ((skb = __skb_dequeue(skb_list))) {
  950. status = IEEE80211_SKB_RXCB(skb);
  951. status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
  952. __skb_queue_tail(frames, skb);
  953. }
  954. no_frame:
  955. tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
  956. tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
  957. }
  958. static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
  959. struct tid_ampdu_rx *tid_agg_rx,
  960. u16 head_seq_num,
  961. struct sk_buff_head *frames)
  962. {
  963. int index;
  964. lockdep_assert_held(&tid_agg_rx->reorder_lock);
  965. while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
  966. index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
  967. ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
  968. frames);
  969. }
  970. }
  971. /*
  972. * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
  973. * the skb was added to the buffer longer than this time ago, the earlier
  974. * frames that have not yet been received are assumed to be lost and the skb
  975. * can be released for processing. This may also release other skb's from the
  976. * reorder buffer if there are no additional gaps between the frames.
  977. *
  978. * Callers must hold tid_agg_rx->reorder_lock.
  979. */
  980. #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
  981. static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
  982. struct tid_ampdu_rx *tid_agg_rx,
  983. struct sk_buff_head *frames)
  984. {
  985. int index, i, j;
  986. lockdep_assert_held(&tid_agg_rx->reorder_lock);
  987. /* release the buffer until next missing frame */
  988. index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
  989. if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
  990. tid_agg_rx->stored_mpdu_num) {
  991. /*
  992. * No buffers ready to be released, but check whether any
  993. * frames in the reorder buffer have timed out.
  994. */
  995. int skipped = 1;
  996. for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
  997. j = (j + 1) % tid_agg_rx->buf_size) {
  998. if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
  999. skipped++;
  1000. continue;
  1001. }
  1002. if (skipped &&
  1003. !time_after(jiffies, tid_agg_rx->reorder_time[j] +
  1004. HT_RX_REORDER_BUF_TIMEOUT))
  1005. goto set_release_timer;
  1006. /* don't leave incomplete A-MSDUs around */
  1007. for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
  1008. i = (i + 1) % tid_agg_rx->buf_size)
  1009. __skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
  1010. ht_dbg_ratelimited(sdata,
  1011. "release an RX reorder frame due to timeout on earlier frames\n");
  1012. ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
  1013. frames);
  1014. /*
  1015. * Increment the head seq# also for the skipped slots.
  1016. */
  1017. tid_agg_rx->head_seq_num =
  1018. (tid_agg_rx->head_seq_num +
  1019. skipped) & IEEE80211_SN_MASK;
  1020. skipped = 0;
  1021. }
  1022. } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
  1023. ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
  1024. frames);
  1025. index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
  1026. }
  1027. if (tid_agg_rx->stored_mpdu_num) {
  1028. j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
  1029. for (; j != (index - 1) % tid_agg_rx->buf_size;
  1030. j = (j + 1) % tid_agg_rx->buf_size) {
  1031. if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
  1032. break;
  1033. }
  1034. set_release_timer:
  1035. if (!tid_agg_rx->removed)
  1036. mod_timer(&tid_agg_rx->reorder_timer,
  1037. tid_agg_rx->reorder_time[j] + 1 +
  1038. HT_RX_REORDER_BUF_TIMEOUT);
  1039. } else {
  1040. del_timer(&tid_agg_rx->reorder_timer);
  1041. }
  1042. }
  1043. /*
  1044. * As this function belongs to the RX path it must be under
  1045. * rcu_read_lock protection. It returns false if the frame
  1046. * can be processed immediately, true if it was consumed.
  1047. */
  1048. static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
  1049. struct tid_ampdu_rx *tid_agg_rx,
  1050. struct sk_buff *skb,
  1051. struct sk_buff_head *frames)
  1052. {
  1053. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1054. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  1055. u16 sc = le16_to_cpu(hdr->seq_ctrl);
  1056. u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
  1057. u16 head_seq_num, buf_size;
  1058. int index;
  1059. bool ret = true;
  1060. spin_lock(&tid_agg_rx->reorder_lock);
  1061. /*
  1062. * Offloaded BA sessions have no known starting sequence number so pick
  1063. * one from first Rxed frame for this tid after BA was started.
  1064. */
  1065. if (unlikely(tid_agg_rx->auto_seq)) {
  1066. tid_agg_rx->auto_seq = false;
  1067. tid_agg_rx->ssn = mpdu_seq_num;
  1068. tid_agg_rx->head_seq_num = mpdu_seq_num;
  1069. }
  1070. buf_size = tid_agg_rx->buf_size;
  1071. head_seq_num = tid_agg_rx->head_seq_num;
  1072. /*
  1073. * If the current MPDU's SN is smaller than the SSN, it shouldn't
  1074. * be reordered.
  1075. */
  1076. if (unlikely(!tid_agg_rx->started)) {
  1077. if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
  1078. ret = false;
  1079. goto out;
  1080. }
  1081. tid_agg_rx->started = true;
  1082. }
  1083. /* frame with out of date sequence number */
  1084. if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
  1085. dev_kfree_skb(skb);
  1086. goto out;
  1087. }
  1088. /*
  1089. * If frame the sequence number exceeds our buffering window
  1090. * size release some previous frames to make room for this one.
  1091. */
  1092. if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
  1093. head_seq_num = ieee80211_sn_inc(
  1094. ieee80211_sn_sub(mpdu_seq_num, buf_size));
  1095. /* release stored frames up to new head to stack */
  1096. ieee80211_release_reorder_frames(sdata, tid_agg_rx,
  1097. head_seq_num, frames);
  1098. }
  1099. /* Now the new frame is always in the range of the reordering buffer */
  1100. index = mpdu_seq_num % tid_agg_rx->buf_size;
  1101. /* check if we already stored this frame */
  1102. if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
  1103. dev_kfree_skb(skb);
  1104. goto out;
  1105. }
  1106. /*
  1107. * If the current MPDU is in the right order and nothing else
  1108. * is stored we can process it directly, no need to buffer it.
  1109. * If it is first but there's something stored, we may be able
  1110. * to release frames after this one.
  1111. */
  1112. if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
  1113. tid_agg_rx->stored_mpdu_num == 0) {
  1114. if (!(status->flag & RX_FLAG_AMSDU_MORE))
  1115. tid_agg_rx->head_seq_num =
  1116. ieee80211_sn_inc(tid_agg_rx->head_seq_num);
  1117. ret = false;
  1118. goto out;
  1119. }
  1120. /* put the frame in the reordering buffer */
  1121. __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
  1122. if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
  1123. tid_agg_rx->reorder_time[index] = jiffies;
  1124. tid_agg_rx->stored_mpdu_num++;
  1125. ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
  1126. }
  1127. out:
  1128. spin_unlock(&tid_agg_rx->reorder_lock);
  1129. return ret;
  1130. }
  1131. /*
  1132. * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
  1133. * true if the MPDU was buffered, false if it should be processed.
  1134. */
  1135. static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
  1136. struct sk_buff_head *frames)
  1137. {
  1138. struct sk_buff *skb = rx->skb;
  1139. struct ieee80211_local *local = rx->local;
  1140. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1141. struct sta_info *sta = rx->sta;
  1142. struct tid_ampdu_rx *tid_agg_rx;
  1143. u16 sc;
  1144. u8 tid, ack_policy;
  1145. if (!ieee80211_is_data_qos(hdr->frame_control) ||
  1146. is_multicast_ether_addr(hdr->addr1))
  1147. goto dont_reorder;
  1148. /*
  1149. * filter the QoS data rx stream according to
  1150. * STA/TID and check if this STA/TID is on aggregation
  1151. */
  1152. if (!sta)
  1153. goto dont_reorder;
  1154. ack_policy = *ieee80211_get_qos_ctl(hdr) &
  1155. IEEE80211_QOS_CTL_ACK_POLICY_MASK;
  1156. tid = ieee80211_get_tid(hdr);
  1157. tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
  1158. if (!tid_agg_rx) {
  1159. if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
  1160. !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
  1161. !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
  1162. ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
  1163. WLAN_BACK_RECIPIENT,
  1164. WLAN_REASON_QSTA_REQUIRE_SETUP);
  1165. goto dont_reorder;
  1166. }
  1167. /* qos null data frames are excluded */
  1168. if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
  1169. goto dont_reorder;
  1170. /* not part of a BA session */
  1171. if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
  1172. ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
  1173. goto dont_reorder;
  1174. /* new, potentially un-ordered, ampdu frame - process it */
  1175. /* reset session timer */
  1176. if (tid_agg_rx->timeout)
  1177. tid_agg_rx->last_rx = jiffies;
  1178. /* if this mpdu is fragmented - terminate rx aggregation session */
  1179. sc = le16_to_cpu(hdr->seq_ctrl);
  1180. if (sc & IEEE80211_SCTL_FRAG) {
  1181. skb_queue_tail(&rx->sdata->skb_queue, skb);
  1182. ieee80211_queue_work(&local->hw, &rx->sdata->work);
  1183. return;
  1184. }
  1185. /*
  1186. * No locking needed -- we will only ever process one
  1187. * RX packet at a time, and thus own tid_agg_rx. All
  1188. * other code manipulating it needs to (and does) make
  1189. * sure that we cannot get to it any more before doing
  1190. * anything with it.
  1191. */
  1192. if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
  1193. frames))
  1194. return;
  1195. dont_reorder:
  1196. __skb_queue_tail(frames, skb);
  1197. }
  1198. static ieee80211_rx_result debug_noinline
  1199. ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
  1200. {
  1201. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
  1202. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  1203. if (status->flag & RX_FLAG_DUP_VALIDATED)
  1204. return RX_CONTINUE;
  1205. /*
  1206. * Drop duplicate 802.11 retransmissions
  1207. * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
  1208. */
  1209. if (rx->skb->len < 24)
  1210. return RX_CONTINUE;
  1211. if (ieee80211_is_ctl(hdr->frame_control) ||
  1212. ieee80211_is_any_nullfunc(hdr->frame_control) ||
  1213. is_multicast_ether_addr(hdr->addr1))
  1214. return RX_CONTINUE;
  1215. if (!rx->sta)
  1216. return RX_CONTINUE;
  1217. if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
  1218. rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
  1219. I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
  1220. rx->sta->rx_stats.num_duplicates++;
  1221. return RX_DROP_UNUSABLE;
  1222. } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
  1223. rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
  1224. }
  1225. return RX_CONTINUE;
  1226. }
  1227. static ieee80211_rx_result debug_noinline
  1228. ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
  1229. {
  1230. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
  1231. /* Drop disallowed frame classes based on STA auth/assoc state;
  1232. * IEEE 802.11, Chap 5.5.
  1233. *
  1234. * mac80211 filters only based on association state, i.e. it drops
  1235. * Class 3 frames from not associated stations. hostapd sends
  1236. * deauth/disassoc frames when needed. In addition, hostapd is
  1237. * responsible for filtering on both auth and assoc states.
  1238. */
  1239. if (ieee80211_vif_is_mesh(&rx->sdata->vif))
  1240. return ieee80211_rx_mesh_check(rx);
  1241. if (unlikely((ieee80211_is_data(hdr->frame_control) ||
  1242. ieee80211_is_pspoll(hdr->frame_control)) &&
  1243. rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
  1244. rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
  1245. rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
  1246. (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
  1247. /*
  1248. * accept port control frames from the AP even when it's not
  1249. * yet marked ASSOC to prevent a race where we don't set the
  1250. * assoc bit quickly enough before it sends the first frame
  1251. */
  1252. if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
  1253. ieee80211_is_data_present(hdr->frame_control)) {
  1254. unsigned int hdrlen;
  1255. __be16 ethertype;
  1256. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  1257. if (rx->skb->len < hdrlen + 8)
  1258. return RX_DROP_MONITOR;
  1259. skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2);
  1260. if (ethertype == rx->sdata->control_port_protocol)
  1261. return RX_CONTINUE;
  1262. }
  1263. if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
  1264. cfg80211_rx_spurious_frame(rx->sdata->dev,
  1265. hdr->addr2,
  1266. GFP_ATOMIC))
  1267. return RX_DROP_UNUSABLE;
  1268. return RX_DROP_MONITOR;
  1269. }
  1270. return RX_CONTINUE;
  1271. }
  1272. static ieee80211_rx_result debug_noinline
  1273. ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
  1274. {
  1275. struct ieee80211_local *local;
  1276. struct ieee80211_hdr *hdr;
  1277. struct sk_buff *skb;
  1278. local = rx->local;
  1279. skb = rx->skb;
  1280. hdr = (struct ieee80211_hdr *) skb->data;
  1281. if (!local->pspolling)
  1282. return RX_CONTINUE;
  1283. if (!ieee80211_has_fromds(hdr->frame_control))
  1284. /* this is not from AP */
  1285. return RX_CONTINUE;
  1286. if (!ieee80211_is_data(hdr->frame_control))
  1287. return RX_CONTINUE;
  1288. if (!ieee80211_has_moredata(hdr->frame_control)) {
  1289. /* AP has no more frames buffered for us */
  1290. local->pspolling = false;
  1291. return RX_CONTINUE;
  1292. }
  1293. /* more data bit is set, let's request a new frame from the AP */
  1294. ieee80211_send_pspoll(local, rx->sdata);
  1295. return RX_CONTINUE;
  1296. }
  1297. static void sta_ps_start(struct sta_info *sta)
  1298. {
  1299. struct ieee80211_sub_if_data *sdata = sta->sdata;
  1300. struct ieee80211_local *local = sdata->local;
  1301. struct ps_data *ps;
  1302. int tid;
  1303. if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
  1304. sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
  1305. ps = &sdata->bss->ps;
  1306. else
  1307. return;
  1308. atomic_inc(&ps->num_sta_ps);
  1309. set_sta_flag(sta, WLAN_STA_PS_STA);
  1310. if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
  1311. drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
  1312. ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
  1313. sta->sta.addr, sta->sta.aid);
  1314. ieee80211_clear_fast_xmit(sta);
  1315. if (!sta->sta.txq[0])
  1316. return;
  1317. for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
  1318. struct ieee80211_txq *txq = sta->sta.txq[tid];
  1319. struct txq_info *txqi = to_txq_info(txq);
  1320. spin_lock(&local->active_txq_lock[txq->ac]);
  1321. if (!list_empty(&txqi->schedule_order))
  1322. list_del_init(&txqi->schedule_order);
  1323. spin_unlock(&local->active_txq_lock[txq->ac]);
  1324. if (txq_has_queue(txq))
  1325. set_bit(tid, &sta->txq_buffered_tids);
  1326. else
  1327. clear_bit(tid, &sta->txq_buffered_tids);
  1328. }
  1329. }
  1330. static void sta_ps_end(struct sta_info *sta)
  1331. {
  1332. ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
  1333. sta->sta.addr, sta->sta.aid);
  1334. if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
  1335. /*
  1336. * Clear the flag only if the other one is still set
  1337. * so that the TX path won't start TX'ing new frames
  1338. * directly ... In the case that the driver flag isn't
  1339. * set ieee80211_sta_ps_deliver_wakeup() will clear it.
  1340. */
  1341. clear_sta_flag(sta, WLAN_STA_PS_STA);
  1342. ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
  1343. sta->sta.addr, sta->sta.aid);
  1344. return;
  1345. }
  1346. set_sta_flag(sta, WLAN_STA_PS_DELIVER);
  1347. clear_sta_flag(sta, WLAN_STA_PS_STA);
  1348. ieee80211_sta_ps_deliver_wakeup(sta);
  1349. }
  1350. int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
  1351. {
  1352. struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
  1353. bool in_ps;
  1354. WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
  1355. /* Don't let the same PS state be set twice */
  1356. in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
  1357. if ((start && in_ps) || (!start && !in_ps))
  1358. return -EINVAL;
  1359. if (start)
  1360. sta_ps_start(sta);
  1361. else
  1362. sta_ps_end(sta);
  1363. return 0;
  1364. }
  1365. EXPORT_SYMBOL(ieee80211_sta_ps_transition);
  1366. void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
  1367. {
  1368. struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
  1369. if (test_sta_flag(sta, WLAN_STA_SP))
  1370. return;
  1371. if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
  1372. ieee80211_sta_ps_deliver_poll_response(sta);
  1373. else
  1374. set_sta_flag(sta, WLAN_STA_PSPOLL);
  1375. }
  1376. EXPORT_SYMBOL(ieee80211_sta_pspoll);
  1377. void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
  1378. {
  1379. struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
  1380. int ac = ieee80211_ac_from_tid(tid);
  1381. /*
  1382. * If this AC is not trigger-enabled do nothing unless the
  1383. * driver is calling us after it already checked.
  1384. *
  1385. * NB: This could/should check a separate bitmap of trigger-
  1386. * enabled queues, but for now we only implement uAPSD w/o
  1387. * TSPEC changes to the ACs, so they're always the same.
  1388. */
  1389. if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) &&
  1390. tid != IEEE80211_NUM_TIDS)
  1391. return;
  1392. /* if we are in a service period, do nothing */
  1393. if (test_sta_flag(sta, WLAN_STA_SP))
  1394. return;
  1395. if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
  1396. ieee80211_sta_ps_deliver_uapsd(sta);
  1397. else
  1398. set_sta_flag(sta, WLAN_STA_UAPSD);
  1399. }
  1400. EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger);
  1401. static ieee80211_rx_result debug_noinline
  1402. ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
  1403. {
  1404. struct ieee80211_sub_if_data *sdata = rx->sdata;
  1405. struct ieee80211_hdr *hdr = (void *)rx->skb->data;
  1406. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  1407. if (!rx->sta)
  1408. return RX_CONTINUE;
  1409. if (sdata->vif.type != NL80211_IFTYPE_AP &&
  1410. sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
  1411. return RX_CONTINUE;
  1412. /*
  1413. * The device handles station powersave, so don't do anything about
  1414. * uAPSD and PS-Poll frames (the latter shouldn't even come up from
  1415. * it to mac80211 since they're handled.)
  1416. */
  1417. if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
  1418. return RX_CONTINUE;
  1419. /*
  1420. * Don't do anything if the station isn't already asleep. In
  1421. * the uAPSD case, the station will probably be marked asleep,
  1422. * in the PS-Poll case the station must be confused ...
  1423. */
  1424. if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
  1425. return RX_CONTINUE;
  1426. if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
  1427. ieee80211_sta_pspoll(&rx->sta->sta);
  1428. /* Free PS Poll skb here instead of returning RX_DROP that would
  1429. * count as an dropped frame. */
  1430. dev_kfree_skb(rx->skb);
  1431. return RX_QUEUED;
  1432. } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
  1433. !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
  1434. ieee80211_has_pm(hdr->frame_control) &&
  1435. (ieee80211_is_data_qos(hdr->frame_control) ||
  1436. ieee80211_is_qos_nullfunc(hdr->frame_control))) {
  1437. u8 tid = ieee80211_get_tid(hdr);
  1438. ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
  1439. }
  1440. return RX_CONTINUE;
  1441. }
  1442. static ieee80211_rx_result debug_noinline
  1443. ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
  1444. {
  1445. struct sta_info *sta = rx->sta;
  1446. struct sk_buff *skb = rx->skb;
  1447. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  1448. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1449. int i;
  1450. if (!sta)
  1451. return RX_CONTINUE;
  1452. /*
  1453. * Update last_rx only for IBSS packets which are for the current
  1454. * BSSID and for station already AUTHORIZED to avoid keeping the
  1455. * current IBSS network alive in cases where other STAs start
  1456. * using different BSSID. This will also give the station another
  1457. * chance to restart the authentication/authorization in case
  1458. * something went wrong the first time.
  1459. */
  1460. if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
  1461. u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
  1462. NL80211_IFTYPE_ADHOC);
  1463. if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
  1464. test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
  1465. sta->rx_stats.last_rx = jiffies;
  1466. if (ieee80211_is_data(hdr->frame_control) &&
  1467. !is_multicast_ether_addr(hdr->addr1))
  1468. sta->rx_stats.last_rate =
  1469. sta_stats_encode_rate(status);
  1470. }
  1471. } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
  1472. sta->rx_stats.last_rx = jiffies;
  1473. } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) &&
  1474. !is_multicast_ether_addr(hdr->addr1)) {
  1475. /*
  1476. * Mesh beacons will update last_rx when if they are found to
  1477. * match the current local configuration when processed.
  1478. */
  1479. sta->rx_stats.last_rx = jiffies;
  1480. if (ieee80211_is_data(hdr->frame_control))
  1481. sta->rx_stats.last_rate = sta_stats_encode_rate(status);
  1482. }
  1483. sta->rx_stats.fragments++;
  1484. u64_stats_update_begin(&rx->sta->rx_stats.syncp);
  1485. sta->rx_stats.bytes += rx->skb->len;
  1486. u64_stats_update_end(&rx->sta->rx_stats.syncp);
  1487. if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
  1488. sta->rx_stats.last_signal = status->signal;
  1489. ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
  1490. }
  1491. if (status->chains) {
  1492. sta->rx_stats.chains = status->chains;
  1493. for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
  1494. int signal = status->chain_signal[i];
  1495. if (!(status->chains & BIT(i)))
  1496. continue;
  1497. sta->rx_stats.chain_signal_last[i] = signal;
  1498. ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
  1499. -signal);
  1500. }
  1501. }
  1502. if (ieee80211_is_s1g_beacon(hdr->frame_control))
  1503. return RX_CONTINUE;
  1504. /*
  1505. * Change STA power saving mode only at the end of a frame
  1506. * exchange sequence, and only for a data or management
  1507. * frame as specified in IEEE 802.11-2016 11.2.3.2
  1508. */
  1509. if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
  1510. !ieee80211_has_morefrags(hdr->frame_control) &&
  1511. !is_multicast_ether_addr(hdr->addr1) &&
  1512. (ieee80211_is_mgmt(hdr->frame_control) ||
  1513. ieee80211_is_data(hdr->frame_control)) &&
  1514. !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
  1515. (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
  1516. rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
  1517. if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
  1518. if (!ieee80211_has_pm(hdr->frame_control))
  1519. sta_ps_end(sta);
  1520. } else {
  1521. if (ieee80211_has_pm(hdr->frame_control))
  1522. sta_ps_start(sta);
  1523. }
  1524. }
  1525. /* mesh power save support */
  1526. if (ieee80211_vif_is_mesh(&rx->sdata->vif))
  1527. ieee80211_mps_rx_h_sta_process(sta, hdr);
  1528. /*
  1529. * Drop (qos-)data::nullfunc frames silently, since they
  1530. * are used only to control station power saving mode.
  1531. */
  1532. if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
  1533. I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
  1534. /*
  1535. * If we receive a 4-addr nullfunc frame from a STA
  1536. * that was not moved to a 4-addr STA vlan yet send
  1537. * the event to userspace and for older hostapd drop
  1538. * the frame to the monitor interface.
  1539. */
  1540. if (ieee80211_has_a4(hdr->frame_control) &&
  1541. (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
  1542. (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
  1543. !rx->sdata->u.vlan.sta))) {
  1544. if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
  1545. cfg80211_rx_unexpected_4addr_frame(
  1546. rx->sdata->dev, sta->sta.addr,
  1547. GFP_ATOMIC);
  1548. return RX_DROP_MONITOR;
  1549. }
  1550. /*
  1551. * Update counter and free packet here to avoid
  1552. * counting this as a dropped packed.
  1553. */
  1554. sta->rx_stats.packets++;
  1555. dev_kfree_skb(rx->skb);
  1556. return RX_QUEUED;
  1557. }
  1558. return RX_CONTINUE;
  1559. } /* ieee80211_rx_h_sta_process */
  1560. static struct ieee80211_key *
  1561. ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx)
  1562. {
  1563. struct ieee80211_key *key = NULL;
  1564. struct ieee80211_sub_if_data *sdata = rx->sdata;
  1565. int idx2;
  1566. /* Make sure key gets set if either BIGTK key index is set so that
  1567. * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected
  1568. * Beacon frames and Beacon frames that claim to use another BIGTK key
  1569. * index (i.e., a key that we do not have).
  1570. */
  1571. if (idx < 0) {
  1572. idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS;
  1573. idx2 = idx + 1;
  1574. } else {
  1575. if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
  1576. idx2 = idx + 1;
  1577. else
  1578. idx2 = idx - 1;
  1579. }
  1580. if (rx->sta)
  1581. key = rcu_dereference(rx->sta->gtk[idx]);
  1582. if (!key)
  1583. key = rcu_dereference(sdata->keys[idx]);
  1584. if (!key && rx->sta)
  1585. key = rcu_dereference(rx->sta->gtk[idx2]);
  1586. if (!key)
  1587. key = rcu_dereference(sdata->keys[idx2]);
  1588. return key;
  1589. }
  1590. static ieee80211_rx_result debug_noinline
  1591. ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
  1592. {
  1593. struct sk_buff *skb = rx->skb;
  1594. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  1595. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1596. int keyidx;
  1597. ieee80211_rx_result result = RX_DROP_UNUSABLE;
  1598. struct ieee80211_key *sta_ptk = NULL;
  1599. struct ieee80211_key *ptk_idx = NULL;
  1600. int mmie_keyidx = -1;
  1601. __le16 fc;
  1602. const struct ieee80211_cipher_scheme *cs = NULL;
  1603. if (ieee80211_is_ext(hdr->frame_control))
  1604. return RX_CONTINUE;
  1605. /*
  1606. * Key selection 101
  1607. *
  1608. * There are five types of keys:
  1609. * - GTK (group keys)
  1610. * - IGTK (group keys for management frames)
  1611. * - BIGTK (group keys for Beacon frames)
  1612. * - PTK (pairwise keys)
  1613. * - STK (station-to-station pairwise keys)
  1614. *
  1615. * When selecting a key, we have to distinguish between multicast
  1616. * (including broadcast) and unicast frames, the latter can only
  1617. * use PTKs and STKs while the former always use GTKs, IGTKs, and
  1618. * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used,
  1619. * then unicast frames can also use key indices like GTKs. Hence, if we
  1620. * don't have a PTK/STK we check the key index for a WEP key.
  1621. *
  1622. * Note that in a regular BSS, multicast frames are sent by the
  1623. * AP only, associated stations unicast the frame to the AP first
  1624. * which then multicasts it on their behalf.
  1625. *
  1626. * There is also a slight problem in IBSS mode: GTKs are negotiated
  1627. * with each station, that is something we don't currently handle.
  1628. * The spec seems to expect that one negotiates the same key with
  1629. * every station but there's no such requirement; VLANs could be
  1630. * possible.
  1631. */
  1632. /* start without a key */
  1633. rx->key = NULL;
  1634. fc = hdr->frame_control;
  1635. if (rx->sta) {
  1636. int keyid = rx->sta->ptk_idx;
  1637. sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
  1638. if (ieee80211_has_protected(fc) &&
  1639. !(status->flag & RX_FLAG_IV_STRIPPED)) {
  1640. cs = rx->sta->cipher_scheme;
  1641. keyid = ieee80211_get_keyid(rx->skb, cs);
  1642. if (unlikely(keyid < 0))
  1643. return RX_DROP_UNUSABLE;
  1644. ptk_idx = rcu_dereference(rx->sta->ptk[keyid]);
  1645. }
  1646. }
  1647. if (!ieee80211_has_protected(fc))
  1648. mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
  1649. if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
  1650. rx->key = ptk_idx ? ptk_idx : sta_ptk;
  1651. if ((status->flag & RX_FLAG_DECRYPTED) &&
  1652. (status->flag & RX_FLAG_IV_STRIPPED))
  1653. return RX_CONTINUE;
  1654. /* Skip decryption if the frame is not protected. */
  1655. if (!ieee80211_has_protected(fc))
  1656. return RX_CONTINUE;
  1657. } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) {
  1658. /* Broadcast/multicast robust management frame / BIP */
  1659. if ((status->flag & RX_FLAG_DECRYPTED) &&
  1660. (status->flag & RX_FLAG_IV_STRIPPED))
  1661. return RX_CONTINUE;
  1662. if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS ||
  1663. mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
  1664. NUM_DEFAULT_BEACON_KEYS) {
  1665. cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
  1666. skb->data,
  1667. skb->len);
  1668. return RX_DROP_MONITOR; /* unexpected BIP keyidx */
  1669. }
  1670. rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx);
  1671. if (!rx->key)
  1672. return RX_CONTINUE; /* Beacon protection not in use */
  1673. } else if (mmie_keyidx >= 0) {
  1674. /* Broadcast/multicast robust management frame / BIP */
  1675. if ((status->flag & RX_FLAG_DECRYPTED) &&
  1676. (status->flag & RX_FLAG_IV_STRIPPED))
  1677. return RX_CONTINUE;
  1678. if (mmie_keyidx < NUM_DEFAULT_KEYS ||
  1679. mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
  1680. return RX_DROP_MONITOR; /* unexpected BIP keyidx */
  1681. if (rx->sta) {
  1682. if (ieee80211_is_group_privacy_action(skb) &&
  1683. test_sta_flag(rx->sta, WLAN_STA_MFP))
  1684. return RX_DROP_MONITOR;
  1685. rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
  1686. }
  1687. if (!rx->key)
  1688. rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
  1689. } else if (!ieee80211_has_protected(fc)) {
  1690. /*
  1691. * The frame was not protected, so skip decryption. However, we
  1692. * need to set rx->key if there is a key that could have been
  1693. * used so that the frame may be dropped if encryption would
  1694. * have been expected.
  1695. */
  1696. struct ieee80211_key *key = NULL;
  1697. struct ieee80211_sub_if_data *sdata = rx->sdata;
  1698. int i;
  1699. if (ieee80211_is_beacon(fc)) {
  1700. key = ieee80211_rx_get_bigtk(rx, -1);
  1701. } else if (ieee80211_is_mgmt(fc) &&
  1702. is_multicast_ether_addr(hdr->addr1)) {
  1703. key = rcu_dereference(rx->sdata->default_mgmt_key);
  1704. } else {
  1705. if (rx->sta) {
  1706. for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
  1707. key = rcu_dereference(rx->sta->gtk[i]);
  1708. if (key)
  1709. break;
  1710. }
  1711. }
  1712. if (!key) {
  1713. for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
  1714. key = rcu_dereference(sdata->keys[i]);
  1715. if (key)
  1716. break;
  1717. }
  1718. }
  1719. }
  1720. if (key)
  1721. rx->key = key;
  1722. return RX_CONTINUE;
  1723. } else {
  1724. /*
  1725. * The device doesn't give us the IV so we won't be
  1726. * able to look up the key. That's ok though, we
  1727. * don't need to decrypt the frame, we just won't
  1728. * be able to keep statistics accurate.
  1729. * Except for key threshold notifications, should
  1730. * we somehow allow the driver to tell us which key
  1731. * the hardware used if this flag is set?
  1732. */
  1733. if ((status->flag & RX_FLAG_DECRYPTED) &&
  1734. (status->flag & RX_FLAG_IV_STRIPPED))
  1735. return RX_CONTINUE;
  1736. keyidx = ieee80211_get_keyid(rx->skb, cs);
  1737. if (unlikely(keyidx < 0))
  1738. return RX_DROP_UNUSABLE;
  1739. /* check per-station GTK first, if multicast packet */
  1740. if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
  1741. rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
  1742. /* if not found, try default key */
  1743. if (!rx->key) {
  1744. rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
  1745. /*
  1746. * RSNA-protected unicast frames should always be
  1747. * sent with pairwise or station-to-station keys,
  1748. * but for WEP we allow using a key index as well.
  1749. */
  1750. if (rx->key &&
  1751. rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
  1752. rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
  1753. !is_multicast_ether_addr(hdr->addr1))
  1754. rx->key = NULL;
  1755. }
  1756. }
  1757. if (rx->key) {
  1758. if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
  1759. return RX_DROP_MONITOR;
  1760. /* TODO: add threshold stuff again */
  1761. } else {
  1762. return RX_DROP_MONITOR;
  1763. }
  1764. switch (rx->key->conf.cipher) {
  1765. case WLAN_CIPHER_SUITE_WEP40:
  1766. case WLAN_CIPHER_SUITE_WEP104:
  1767. result = ieee80211_crypto_wep_decrypt(rx);
  1768. break;
  1769. case WLAN_CIPHER_SUITE_TKIP:
  1770. result = ieee80211_crypto_tkip_decrypt(rx);
  1771. break;
  1772. case WLAN_CIPHER_SUITE_CCMP:
  1773. result = ieee80211_crypto_ccmp_decrypt(
  1774. rx, IEEE80211_CCMP_MIC_LEN);
  1775. break;
  1776. case WLAN_CIPHER_SUITE_CCMP_256:
  1777. result = ieee80211_crypto_ccmp_decrypt(
  1778. rx, IEEE80211_CCMP_256_MIC_LEN);
  1779. break;
  1780. case WLAN_CIPHER_SUITE_AES_CMAC:
  1781. result = ieee80211_crypto_aes_cmac_decrypt(rx);
  1782. break;
  1783. case WLAN_CIPHER_SUITE_BIP_CMAC_256:
  1784. result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
  1785. break;
  1786. case WLAN_CIPHER_SUITE_BIP_GMAC_128:
  1787. case WLAN_CIPHER_SUITE_BIP_GMAC_256:
  1788. result = ieee80211_crypto_aes_gmac_decrypt(rx);
  1789. break;
  1790. case WLAN_CIPHER_SUITE_GCMP:
  1791. case WLAN_CIPHER_SUITE_GCMP_256:
  1792. result = ieee80211_crypto_gcmp_decrypt(rx);
  1793. break;
  1794. default:
  1795. result = ieee80211_crypto_hw_decrypt(rx);
  1796. }
  1797. /* the hdr variable is invalid after the decrypt handlers */
  1798. /* either the frame has been decrypted or will be dropped */
  1799. status->flag |= RX_FLAG_DECRYPTED;
  1800. if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE))
  1801. cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
  1802. skb->data, skb->len);
  1803. return result;
  1804. }
  1805. void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
  1806. {
  1807. int i;
  1808. for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
  1809. skb_queue_head_init(&cache->entries[i].skb_list);
  1810. }
  1811. void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
  1812. {
  1813. int i;
  1814. for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
  1815. __skb_queue_purge(&cache->entries[i].skb_list);
  1816. }
  1817. static inline struct ieee80211_fragment_entry *
  1818. ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
  1819. unsigned int frag, unsigned int seq, int rx_queue,
  1820. struct sk_buff **skb)
  1821. {
  1822. struct ieee80211_fragment_entry *entry;
  1823. entry = &cache->entries[cache->next++];
  1824. if (cache->next >= IEEE80211_FRAGMENT_MAX)
  1825. cache->next = 0;
  1826. __skb_queue_purge(&entry->skb_list);
  1827. __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
  1828. *skb = NULL;
  1829. entry->first_frag_time = jiffies;
  1830. entry->seq = seq;
  1831. entry->rx_queue = rx_queue;
  1832. entry->last_frag = frag;
  1833. entry->check_sequential_pn = false;
  1834. entry->extra_len = 0;
  1835. return entry;
  1836. }
  1837. static inline struct ieee80211_fragment_entry *
  1838. ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
  1839. unsigned int frag, unsigned int seq,
  1840. int rx_queue, struct ieee80211_hdr *hdr)
  1841. {
  1842. struct ieee80211_fragment_entry *entry;
  1843. int i, idx;
  1844. idx = cache->next;
  1845. for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
  1846. struct ieee80211_hdr *f_hdr;
  1847. struct sk_buff *f_skb;
  1848. idx--;
  1849. if (idx < 0)
  1850. idx = IEEE80211_FRAGMENT_MAX - 1;
  1851. entry = &cache->entries[idx];
  1852. if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
  1853. entry->rx_queue != rx_queue ||
  1854. entry->last_frag + 1 != frag)
  1855. continue;
  1856. f_skb = __skb_peek(&entry->skb_list);
  1857. f_hdr = (struct ieee80211_hdr *) f_skb->data;
  1858. /*
  1859. * Check ftype and addresses are equal, else check next fragment
  1860. */
  1861. if (((hdr->frame_control ^ f_hdr->frame_control) &
  1862. cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
  1863. !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
  1864. !ether_addr_equal(hdr->addr2, f_hdr->addr2))
  1865. continue;
  1866. if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
  1867. __skb_queue_purge(&entry->skb_list);
  1868. continue;
  1869. }
  1870. return entry;
  1871. }
  1872. return NULL;
  1873. }
  1874. static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc)
  1875. {
  1876. return rx->key &&
  1877. (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
  1878. rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
  1879. rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
  1880. rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
  1881. ieee80211_has_protected(fc);
  1882. }
  1883. static ieee80211_rx_result debug_noinline
  1884. ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
  1885. {
  1886. struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
  1887. struct ieee80211_hdr *hdr;
  1888. u16 sc;
  1889. __le16 fc;
  1890. unsigned int frag, seq;
  1891. struct ieee80211_fragment_entry *entry;
  1892. struct sk_buff *skb;
  1893. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  1894. hdr = (struct ieee80211_hdr *)rx->skb->data;
  1895. fc = hdr->frame_control;
  1896. if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc))
  1897. return RX_CONTINUE;
  1898. sc = le16_to_cpu(hdr->seq_ctrl);
  1899. frag = sc & IEEE80211_SCTL_FRAG;
  1900. if (rx->sta)
  1901. cache = &rx->sta->frags;
  1902. if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
  1903. goto out;
  1904. if (is_multicast_ether_addr(hdr->addr1))
  1905. return RX_DROP_MONITOR;
  1906. I802_DEBUG_INC(rx->local->rx_handlers_fragments);
  1907. if (skb_linearize(rx->skb))
  1908. return RX_DROP_UNUSABLE;
  1909. /*
  1910. * skb_linearize() might change the skb->data and
  1911. * previously cached variables (in this case, hdr) need to
  1912. * be refreshed with the new data.
  1913. */
  1914. hdr = (struct ieee80211_hdr *)rx->skb->data;
  1915. seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
  1916. if (frag == 0) {
  1917. /* This is the first fragment of a new frame. */
  1918. entry = ieee80211_reassemble_add(cache, frag, seq,
  1919. rx->seqno_idx, &(rx->skb));
  1920. if (requires_sequential_pn(rx, fc)) {
  1921. int queue = rx->security_idx;
  1922. /* Store CCMP/GCMP PN so that we can verify that the
  1923. * next fragment has a sequential PN value.
  1924. */
  1925. entry->check_sequential_pn = true;
  1926. entry->is_protected = true;
  1927. entry->key_color = rx->key->color;
  1928. memcpy(entry->last_pn,
  1929. rx->key->u.ccmp.rx_pn[queue],
  1930. IEEE80211_CCMP_PN_LEN);
  1931. BUILD_BUG_ON(offsetof(struct ieee80211_key,
  1932. u.ccmp.rx_pn) !=
  1933. offsetof(struct ieee80211_key,
  1934. u.gcmp.rx_pn));
  1935. BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
  1936. sizeof(rx->key->u.gcmp.rx_pn[queue]));
  1937. BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
  1938. IEEE80211_GCMP_PN_LEN);
  1939. } else if (rx->key &&
  1940. (ieee80211_has_protected(fc) ||
  1941. (status->flag & RX_FLAG_DECRYPTED))) {
  1942. entry->is_protected = true;
  1943. entry->key_color = rx->key->color;
  1944. }
  1945. return RX_QUEUED;
  1946. }
  1947. /* This is a fragment for a frame that should already be pending in
  1948. * fragment cache. Add this fragment to the end of the pending entry.
  1949. */
  1950. entry = ieee80211_reassemble_find(cache, frag, seq,
  1951. rx->seqno_idx, hdr);
  1952. if (!entry) {
  1953. I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
  1954. return RX_DROP_MONITOR;
  1955. }
  1956. /* "The receiver shall discard MSDUs and MMPDUs whose constituent
  1957. * MPDU PN values are not incrementing in steps of 1."
  1958. * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
  1959. * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
  1960. */
  1961. if (entry->check_sequential_pn) {
  1962. int i;
  1963. u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
  1964. if (!requires_sequential_pn(rx, fc))
  1965. return RX_DROP_UNUSABLE;
  1966. /* Prevent mixed key and fragment cache attacks */
  1967. if (entry->key_color != rx->key->color)
  1968. return RX_DROP_UNUSABLE;
  1969. memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
  1970. for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
  1971. pn[i]++;
  1972. if (pn[i])
  1973. break;
  1974. }
  1975. rpn = rx->ccm_gcm.pn;
  1976. if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
  1977. return RX_DROP_UNUSABLE;
  1978. memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
  1979. } else if (entry->is_protected &&
  1980. (!rx->key ||
  1981. (!ieee80211_has_protected(fc) &&
  1982. !(status->flag & RX_FLAG_DECRYPTED)) ||
  1983. rx->key->color != entry->key_color)) {
  1984. /* Drop this as a mixed key or fragment cache attack, even
  1985. * if for TKIP Michael MIC should protect us, and WEP is a
  1986. * lost cause anyway.
  1987. */
  1988. return RX_DROP_UNUSABLE;
  1989. } else if (entry->is_protected && rx->key &&
  1990. entry->key_color != rx->key->color &&
  1991. (status->flag & RX_FLAG_DECRYPTED)) {
  1992. return RX_DROP_UNUSABLE;
  1993. }
  1994. skb_pull(rx->skb, ieee80211_hdrlen(fc));
  1995. __skb_queue_tail(&entry->skb_list, rx->skb);
  1996. entry->last_frag = frag;
  1997. entry->extra_len += rx->skb->len;
  1998. if (ieee80211_has_morefrags(fc)) {
  1999. rx->skb = NULL;
  2000. return RX_QUEUED;
  2001. }
  2002. rx->skb = __skb_dequeue(&entry->skb_list);
  2003. if (skb_tailroom(rx->skb) < entry->extra_len) {
  2004. I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
  2005. if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
  2006. GFP_ATOMIC))) {
  2007. I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
  2008. __skb_queue_purge(&entry->skb_list);
  2009. return RX_DROP_UNUSABLE;
  2010. }
  2011. }
  2012. while ((skb = __skb_dequeue(&entry->skb_list))) {
  2013. skb_put_data(rx->skb, skb->data, skb->len);
  2014. dev_kfree_skb(skb);
  2015. }
  2016. out:
  2017. ieee80211_led_rx(rx->local);
  2018. if (rx->sta)
  2019. rx->sta->rx_stats.packets++;
  2020. return RX_CONTINUE;
  2021. }
  2022. static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
  2023. {
  2024. if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
  2025. return -EACCES;
  2026. return 0;
  2027. }
  2028. static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
  2029. {
  2030. struct ieee80211_hdr *hdr = (void *)rx->skb->data;
  2031. struct sk_buff *skb = rx->skb;
  2032. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  2033. /*
  2034. * Pass through unencrypted frames if the hardware has
  2035. * decrypted them already.
  2036. */
  2037. if (status->flag & RX_FLAG_DECRYPTED)
  2038. return 0;
  2039. /* check mesh EAPOL frames first */
  2040. if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
  2041. ieee80211_is_data(fc))) {
  2042. struct ieee80211s_hdr *mesh_hdr;
  2043. u16 hdr_len = ieee80211_hdrlen(fc);
  2044. u16 ethertype_offset;
  2045. __be16 ethertype;
  2046. if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
  2047. goto drop_check;
  2048. /* make sure fixed part of mesh header is there, also checks skb len */
  2049. if (!pskb_may_pull(rx->skb, hdr_len + 6))
  2050. goto drop_check;
  2051. mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
  2052. ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
  2053. sizeof(rfc1042_header);
  2054. if (skb_copy_bits(rx->skb, ethertype_offset, &ethertype, 2) == 0 &&
  2055. ethertype == rx->sdata->control_port_protocol)
  2056. return 0;
  2057. }
  2058. drop_check:
  2059. /* Drop unencrypted frames if key is set. */
  2060. if (unlikely(!ieee80211_has_protected(fc) &&
  2061. !ieee80211_is_any_nullfunc(fc) &&
  2062. ieee80211_is_data(fc) && rx->key))
  2063. return -EACCES;
  2064. return 0;
  2065. }
  2066. static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
  2067. {
  2068. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
  2069. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  2070. __le16 fc = hdr->frame_control;
  2071. /*
  2072. * Pass through unencrypted frames if the hardware has
  2073. * decrypted them already.
  2074. */
  2075. if (status->flag & RX_FLAG_DECRYPTED)
  2076. return 0;
  2077. if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
  2078. if (unlikely(!ieee80211_has_protected(fc) &&
  2079. ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
  2080. rx->key)) {
  2081. if (ieee80211_is_deauth(fc) ||
  2082. ieee80211_is_disassoc(fc))
  2083. cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
  2084. rx->skb->data,
  2085. rx->skb->len);
  2086. return -EACCES;
  2087. }
  2088. /* BIP does not use Protected field, so need to check MMIE */
  2089. if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
  2090. ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
  2091. if (ieee80211_is_deauth(fc) ||
  2092. ieee80211_is_disassoc(fc))
  2093. cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
  2094. rx->skb->data,
  2095. rx->skb->len);
  2096. return -EACCES;
  2097. }
  2098. if (unlikely(ieee80211_is_beacon(fc) && rx->key &&
  2099. ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
  2100. cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
  2101. rx->skb->data,
  2102. rx->skb->len);
  2103. return -EACCES;
  2104. }
  2105. /*
  2106. * When using MFP, Action frames are not allowed prior to
  2107. * having configured keys.
  2108. */
  2109. if (unlikely(ieee80211_is_action(fc) && !rx->key &&
  2110. ieee80211_is_robust_mgmt_frame(rx->skb)))
  2111. return -EACCES;
  2112. }
  2113. return 0;
  2114. }
  2115. static int
  2116. __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
  2117. {
  2118. struct ieee80211_sub_if_data *sdata = rx->sdata;
  2119. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
  2120. bool check_port_control = false;
  2121. struct ethhdr *ehdr;
  2122. int ret;
  2123. *port_control = false;
  2124. if (ieee80211_has_a4(hdr->frame_control) &&
  2125. sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
  2126. return -1;
  2127. if (sdata->vif.type == NL80211_IFTYPE_STATION &&
  2128. !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
  2129. if (!sdata->u.mgd.use_4addr)
  2130. return -1;
  2131. else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr))
  2132. check_port_control = true;
  2133. }
  2134. if (is_multicast_ether_addr(hdr->addr1) &&
  2135. sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
  2136. return -1;
  2137. ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
  2138. if (ret < 0)
  2139. return ret;
  2140. ehdr = (struct ethhdr *) rx->skb->data;
  2141. if (ehdr->h_proto == rx->sdata->control_port_protocol)
  2142. *port_control = true;
  2143. else if (check_port_control)
  2144. return -1;
  2145. return 0;
  2146. }
  2147. /*
  2148. * requires that rx->skb is a frame with ethernet header
  2149. */
  2150. static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
  2151. {
  2152. static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
  2153. = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
  2154. struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
  2155. /*
  2156. * Allow EAPOL frames to us/the PAE group address regardless of
  2157. * whether the frame was encrypted or not, and always disallow
  2158. * all other destination addresses for them.
  2159. */
  2160. if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol))
  2161. return ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
  2162. ether_addr_equal(ehdr->h_dest, pae_group_addr);
  2163. if (ieee80211_802_1x_port_control(rx) ||
  2164. ieee80211_drop_unencrypted(rx, fc))
  2165. return false;
  2166. return true;
  2167. }
  2168. static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
  2169. struct ieee80211_rx_data *rx)
  2170. {
  2171. struct ieee80211_sub_if_data *sdata = rx->sdata;
  2172. struct net_device *dev = sdata->dev;
  2173. if (unlikely((skb->protocol == sdata->control_port_protocol ||
  2174. (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) &&
  2175. !sdata->control_port_no_preauth)) &&
  2176. sdata->control_port_over_nl80211)) {
  2177. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  2178. bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
  2179. cfg80211_rx_control_port(dev, skb, noencrypt);
  2180. dev_kfree_skb(skb);
  2181. } else {
  2182. struct ethhdr *ehdr = (void *)skb_mac_header(skb);
  2183. memset(skb->cb, 0, sizeof(skb->cb));
  2184. /*
  2185. * 802.1X over 802.11 requires that the authenticator address
  2186. * be used for EAPOL frames. However, 802.1X allows the use of
  2187. * the PAE group address instead. If the interface is part of
  2188. * a bridge and we pass the frame with the PAE group address,
  2189. * then the bridge will forward it to the network (even if the
  2190. * client was not associated yet), which isn't supposed to
  2191. * happen.
  2192. * To avoid that, rewrite the destination address to our own
  2193. * address, so that the authenticator (e.g. hostapd) will see
  2194. * the frame, but bridge won't forward it anywhere else. Note
  2195. * that due to earlier filtering, the only other address can
  2196. * be the PAE group address.
  2197. */
  2198. if (unlikely(skb->protocol == sdata->control_port_protocol &&
  2199. !ether_addr_equal(ehdr->h_dest, sdata->vif.addr)))
  2200. ether_addr_copy(ehdr->h_dest, sdata->vif.addr);
  2201. /* deliver to local stack */
  2202. if (rx->list)
  2203. list_add_tail(&skb->list, rx->list);
  2204. else
  2205. netif_receive_skb(skb);
  2206. }
  2207. }
  2208. /*
  2209. * requires that rx->skb is a frame with ethernet header
  2210. */
  2211. static void
  2212. ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
  2213. {
  2214. struct ieee80211_sub_if_data *sdata = rx->sdata;
  2215. struct net_device *dev = sdata->dev;
  2216. struct sk_buff *skb, *xmit_skb;
  2217. struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
  2218. struct sta_info *dsta;
  2219. skb = rx->skb;
  2220. xmit_skb = NULL;
  2221. ieee80211_rx_stats(dev, skb->len);
  2222. if (rx->sta) {
  2223. /* The seqno index has the same property as needed
  2224. * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
  2225. * for non-QoS-data frames. Here we know it's a data
  2226. * frame, so count MSDUs.
  2227. */
  2228. u64_stats_update_begin(&rx->sta->rx_stats.syncp);
  2229. rx->sta->rx_stats.msdu[rx->seqno_idx]++;
  2230. u64_stats_update_end(&rx->sta->rx_stats.syncp);
  2231. }
  2232. if ((sdata->vif.type == NL80211_IFTYPE_AP ||
  2233. sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
  2234. !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
  2235. ehdr->h_proto != rx->sdata->control_port_protocol &&
  2236. (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
  2237. if (is_multicast_ether_addr(ehdr->h_dest) &&
  2238. ieee80211_vif_get_num_mcast_if(sdata) != 0) {
  2239. /*
  2240. * send multicast frames both to higher layers in
  2241. * local net stack and back to the wireless medium
  2242. */
  2243. xmit_skb = skb_copy(skb, GFP_ATOMIC);
  2244. if (!xmit_skb)
  2245. net_info_ratelimited("%s: failed to clone multicast frame\n",
  2246. dev->name);
  2247. } else if (!is_multicast_ether_addr(ehdr->h_dest) &&
  2248. !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) {
  2249. dsta = sta_info_get(sdata, ehdr->h_dest);
  2250. if (dsta) {
  2251. /*
  2252. * The destination station is associated to
  2253. * this AP (in this VLAN), so send the frame
  2254. * directly to it and do not pass it to local
  2255. * net stack.
  2256. */
  2257. xmit_skb = skb;
  2258. skb = NULL;
  2259. }
  2260. }
  2261. }
  2262. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  2263. if (skb) {
  2264. /* 'align' will only take the values 0 or 2 here since all
  2265. * frames are required to be aligned to 2-byte boundaries
  2266. * when being passed to mac80211; the code here works just
  2267. * as well if that isn't true, but mac80211 assumes it can
  2268. * access fields as 2-byte aligned (e.g. for ether_addr_equal)
  2269. */
  2270. int align;
  2271. align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
  2272. if (align) {
  2273. if (WARN_ON(skb_headroom(skb) < 3)) {
  2274. dev_kfree_skb(skb);
  2275. skb = NULL;
  2276. } else {
  2277. u8 *data = skb->data;
  2278. size_t len = skb_headlen(skb);
  2279. skb->data -= align;
  2280. memmove(skb->data, data, len);
  2281. skb_set_tail_pointer(skb, len);
  2282. }
  2283. }
  2284. }
  2285. #endif
  2286. if (skb) {
  2287. skb->protocol = eth_type_trans(skb, dev);
  2288. ieee80211_deliver_skb_to_local_stack(skb, rx);
  2289. }
  2290. if (xmit_skb) {
  2291. /*
  2292. * Send to wireless media and increase priority by 256 to
  2293. * keep the received priority instead of reclassifying
  2294. * the frame (see cfg80211_classify8021d).
  2295. */
  2296. xmit_skb->priority += 256;
  2297. xmit_skb->protocol = htons(ETH_P_802_3);
  2298. skb_reset_network_header(xmit_skb);
  2299. skb_reset_mac_header(xmit_skb);
  2300. dev_queue_xmit(xmit_skb);
  2301. }
  2302. }
  2303. static ieee80211_rx_result debug_noinline
  2304. __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
  2305. {
  2306. struct net_device *dev = rx->sdata->dev;
  2307. struct sk_buff *skb = rx->skb;
  2308. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  2309. __le16 fc = hdr->frame_control;
  2310. struct sk_buff_head frame_list;
  2311. struct ethhdr ethhdr;
  2312. const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
  2313. if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
  2314. check_da = NULL;
  2315. check_sa = NULL;
  2316. } else switch (rx->sdata->vif.type) {
  2317. case NL80211_IFTYPE_AP:
  2318. case NL80211_IFTYPE_AP_VLAN:
  2319. check_da = NULL;
  2320. break;
  2321. case NL80211_IFTYPE_STATION:
  2322. if (!rx->sta ||
  2323. !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
  2324. check_sa = NULL;
  2325. break;
  2326. case NL80211_IFTYPE_MESH_POINT:
  2327. check_sa = NULL;
  2328. break;
  2329. default:
  2330. break;
  2331. }
  2332. skb->dev = dev;
  2333. __skb_queue_head_init(&frame_list);
  2334. if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
  2335. rx->sdata->vif.addr,
  2336. rx->sdata->vif.type,
  2337. data_offset, true))
  2338. return RX_DROP_UNUSABLE;
  2339. ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
  2340. rx->sdata->vif.type,
  2341. rx->local->hw.extra_tx_headroom,
  2342. check_da, check_sa);
  2343. while (!skb_queue_empty(&frame_list)) {
  2344. rx->skb = __skb_dequeue(&frame_list);
  2345. if (!ieee80211_frame_allowed(rx, fc)) {
  2346. dev_kfree_skb(rx->skb);
  2347. continue;
  2348. }
  2349. ieee80211_deliver_skb(rx);
  2350. }
  2351. return RX_QUEUED;
  2352. }
  2353. static ieee80211_rx_result debug_noinline
  2354. ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
  2355. {
  2356. struct sk_buff *skb = rx->skb;
  2357. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  2358. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  2359. __le16 fc = hdr->frame_control;
  2360. if (!(status->rx_flags & IEEE80211_RX_AMSDU))
  2361. return RX_CONTINUE;
  2362. if (unlikely(!ieee80211_is_data(fc)))
  2363. return RX_CONTINUE;
  2364. if (unlikely(!ieee80211_is_data_present(fc)))
  2365. return RX_DROP_MONITOR;
  2366. if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
  2367. switch (rx->sdata->vif.type) {
  2368. case NL80211_IFTYPE_AP_VLAN:
  2369. if (!rx->sdata->u.vlan.sta)
  2370. return RX_DROP_UNUSABLE;
  2371. break;
  2372. case NL80211_IFTYPE_STATION:
  2373. if (!rx->sdata->u.mgd.use_4addr)
  2374. return RX_DROP_UNUSABLE;
  2375. break;
  2376. default:
  2377. return RX_DROP_UNUSABLE;
  2378. }
  2379. }
  2380. if (is_multicast_ether_addr(hdr->addr1))
  2381. return RX_DROP_UNUSABLE;
  2382. if (rx->key) {
  2383. /*
  2384. * We should not receive A-MSDUs on pre-HT connections,
  2385. * and HT connections cannot use old ciphers. Thus drop
  2386. * them, as in those cases we couldn't even have SPP
  2387. * A-MSDUs or such.
  2388. */
  2389. switch (rx->key->conf.cipher) {
  2390. case WLAN_CIPHER_SUITE_WEP40:
  2391. case WLAN_CIPHER_SUITE_WEP104:
  2392. case WLAN_CIPHER_SUITE_TKIP:
  2393. return RX_DROP_UNUSABLE;
  2394. default:
  2395. break;
  2396. }
  2397. }
  2398. return __ieee80211_rx_h_amsdu(rx, 0);
  2399. }
  2400. #ifdef CONFIG_MAC80211_MESH
  2401. static ieee80211_rx_result
  2402. ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
  2403. {
  2404. struct ieee80211_hdr *fwd_hdr, *hdr;
  2405. struct ieee80211_tx_info *info;
  2406. struct ieee80211s_hdr *mesh_hdr;
  2407. struct sk_buff *skb = rx->skb, *fwd_skb;
  2408. struct ieee80211_local *local = rx->local;
  2409. struct ieee80211_sub_if_data *sdata = rx->sdata;
  2410. struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
  2411. u16 ac, q, hdrlen;
  2412. int tailroom = 0;
  2413. hdr = (struct ieee80211_hdr *) skb->data;
  2414. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  2415. /* make sure fixed part of mesh header is there, also checks skb len */
  2416. if (!pskb_may_pull(rx->skb, hdrlen + 6))
  2417. return RX_DROP_MONITOR;
  2418. mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
  2419. /* make sure full mesh header is there, also checks skb len */
  2420. if (!pskb_may_pull(rx->skb,
  2421. hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
  2422. return RX_DROP_MONITOR;
  2423. /* reload pointers */
  2424. hdr = (struct ieee80211_hdr *) skb->data;
  2425. mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
  2426. if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
  2427. return RX_DROP_MONITOR;
  2428. /* frame is in RMC, don't forward */
  2429. if (ieee80211_is_data(hdr->frame_control) &&
  2430. is_multicast_ether_addr(hdr->addr1) &&
  2431. mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
  2432. return RX_DROP_MONITOR;
  2433. if (!ieee80211_is_data(hdr->frame_control))
  2434. return RX_CONTINUE;
  2435. if (!mesh_hdr->ttl)
  2436. return RX_DROP_MONITOR;
  2437. if (mesh_hdr->flags & MESH_FLAGS_AE) {
  2438. struct mesh_path *mppath;
  2439. char *proxied_addr;
  2440. char *mpp_addr;
  2441. if (is_multicast_ether_addr(hdr->addr1)) {
  2442. mpp_addr = hdr->addr3;
  2443. proxied_addr = mesh_hdr->eaddr1;
  2444. } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
  2445. MESH_FLAGS_AE_A5_A6) {
  2446. /* has_a4 already checked in ieee80211_rx_mesh_check */
  2447. mpp_addr = hdr->addr4;
  2448. proxied_addr = mesh_hdr->eaddr2;
  2449. } else {
  2450. return RX_DROP_MONITOR;
  2451. }
  2452. rcu_read_lock();
  2453. mppath = mpp_path_lookup(sdata, proxied_addr);
  2454. if (!mppath) {
  2455. mpp_path_add(sdata, proxied_addr, mpp_addr);
  2456. } else {
  2457. spin_lock_bh(&mppath->state_lock);
  2458. if (!ether_addr_equal(mppath->mpp, mpp_addr))
  2459. memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
  2460. mppath->exp_time = jiffies;
  2461. spin_unlock_bh(&mppath->state_lock);
  2462. }
  2463. rcu_read_unlock();
  2464. }
  2465. /* Frame has reached destination. Don't forward */
  2466. if (!is_multicast_ether_addr(hdr->addr1) &&
  2467. ether_addr_equal(sdata->vif.addr, hdr->addr3))
  2468. return RX_CONTINUE;
  2469. ac = ieee802_1d_to_ac[skb->priority];
  2470. q = sdata->vif.hw_queue[ac];
  2471. if (ieee80211_queue_stopped(&local->hw, q)) {
  2472. IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
  2473. return RX_DROP_MONITOR;
  2474. }
  2475. skb_set_queue_mapping(skb, ac);
  2476. if (!--mesh_hdr->ttl) {
  2477. if (!is_multicast_ether_addr(hdr->addr1))
  2478. IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
  2479. dropped_frames_ttl);
  2480. goto out;
  2481. }
  2482. if (!ifmsh->mshcfg.dot11MeshForwarding)
  2483. goto out;
  2484. if (sdata->crypto_tx_tailroom_needed_cnt)
  2485. tailroom = IEEE80211_ENCRYPT_TAILROOM;
  2486. fwd_skb = skb_copy_expand(skb, local->tx_headroom +
  2487. sdata->encrypt_headroom,
  2488. tailroom, GFP_ATOMIC);
  2489. if (!fwd_skb)
  2490. goto out;
  2491. fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
  2492. fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
  2493. info = IEEE80211_SKB_CB(fwd_skb);
  2494. memset(info, 0, sizeof(*info));
  2495. info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
  2496. info->control.vif = &rx->sdata->vif;
  2497. info->control.jiffies = jiffies;
  2498. if (is_multicast_ether_addr(fwd_hdr->addr1)) {
  2499. IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
  2500. memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
  2501. /* update power mode indication when forwarding */
  2502. ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
  2503. } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
  2504. /* mesh power mode flags updated in mesh_nexthop_lookup */
  2505. IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
  2506. } else {
  2507. /* unable to resolve next hop */
  2508. mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
  2509. fwd_hdr->addr3, 0,
  2510. WLAN_REASON_MESH_PATH_NOFORWARD,
  2511. fwd_hdr->addr2);
  2512. IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
  2513. kfree_skb(fwd_skb);
  2514. return RX_DROP_MONITOR;
  2515. }
  2516. IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
  2517. ieee80211_add_pending_skb(local, fwd_skb);
  2518. out:
  2519. if (is_multicast_ether_addr(hdr->addr1))
  2520. return RX_CONTINUE;
  2521. return RX_DROP_MONITOR;
  2522. }
  2523. #endif
  2524. static ieee80211_rx_result debug_noinline
  2525. ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
  2526. {
  2527. struct ieee80211_sub_if_data *sdata = rx->sdata;
  2528. struct ieee80211_local *local = rx->local;
  2529. struct net_device *dev = sdata->dev;
  2530. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
  2531. __le16 fc = hdr->frame_control;
  2532. bool port_control;
  2533. int err;
  2534. if (unlikely(!ieee80211_is_data(hdr->frame_control)))
  2535. return RX_CONTINUE;
  2536. if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
  2537. return RX_DROP_MONITOR;
  2538. /*
  2539. * Send unexpected-4addr-frame event to hostapd. For older versions,
  2540. * also drop the frame to cooked monitor interfaces.
  2541. */
  2542. if (ieee80211_has_a4(hdr->frame_control) &&
  2543. sdata->vif.type == NL80211_IFTYPE_AP) {
  2544. if (rx->sta &&
  2545. !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
  2546. cfg80211_rx_unexpected_4addr_frame(
  2547. rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
  2548. return RX_DROP_MONITOR;
  2549. }
  2550. err = __ieee80211_data_to_8023(rx, &port_control);
  2551. if (unlikely(err))
  2552. return RX_DROP_UNUSABLE;
  2553. if (!ieee80211_frame_allowed(rx, fc))
  2554. return RX_DROP_MONITOR;
  2555. /* directly handle TDLS channel switch requests/responses */
  2556. if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
  2557. cpu_to_be16(ETH_P_TDLS))) {
  2558. struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
  2559. if (pskb_may_pull(rx->skb,
  2560. offsetof(struct ieee80211_tdls_data, u)) &&
  2561. tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
  2562. tf->category == WLAN_CATEGORY_TDLS &&
  2563. (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
  2564. tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
  2565. skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
  2566. schedule_work(&local->tdls_chsw_work);
  2567. if (rx->sta)
  2568. rx->sta->rx_stats.packets++;
  2569. return RX_QUEUED;
  2570. }
  2571. }
  2572. if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
  2573. unlikely(port_control) && sdata->bss) {
  2574. sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
  2575. u.ap);
  2576. dev = sdata->dev;
  2577. rx->sdata = sdata;
  2578. }
  2579. rx->skb->dev = dev;
  2580. if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
  2581. local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
  2582. !is_multicast_ether_addr(
  2583. ((struct ethhdr *)rx->skb->data)->h_dest) &&
  2584. (!local->scanning &&
  2585. !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
  2586. mod_timer(&local->dynamic_ps_timer, jiffies +
  2587. msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
  2588. ieee80211_deliver_skb(rx);
  2589. return RX_QUEUED;
  2590. }
  2591. static ieee80211_rx_result debug_noinline
  2592. ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
  2593. {
  2594. struct sk_buff *skb = rx->skb;
  2595. struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
  2596. struct tid_ampdu_rx *tid_agg_rx;
  2597. u16 start_seq_num;
  2598. u16 tid;
  2599. if (likely(!ieee80211_is_ctl(bar->frame_control)))
  2600. return RX_CONTINUE;
  2601. if (ieee80211_is_back_req(bar->frame_control)) {
  2602. struct {
  2603. __le16 control, start_seq_num;
  2604. } __packed bar_data;
  2605. struct ieee80211_event event = {
  2606. .type = BAR_RX_EVENT,
  2607. };
  2608. if (!rx->sta)
  2609. return RX_DROP_MONITOR;
  2610. if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
  2611. &bar_data, sizeof(bar_data)))
  2612. return RX_DROP_MONITOR;
  2613. tid = le16_to_cpu(bar_data.control) >> 12;
  2614. if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
  2615. !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
  2616. ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
  2617. WLAN_BACK_RECIPIENT,
  2618. WLAN_REASON_QSTA_REQUIRE_SETUP);
  2619. tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
  2620. if (!tid_agg_rx)
  2621. return RX_DROP_MONITOR;
  2622. start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
  2623. event.u.ba.tid = tid;
  2624. event.u.ba.ssn = start_seq_num;
  2625. event.u.ba.sta = &rx->sta->sta;
  2626. /* reset session timer */
  2627. if (tid_agg_rx->timeout)
  2628. mod_timer(&tid_agg_rx->session_timer,
  2629. TU_TO_EXP_TIME(tid_agg_rx->timeout));
  2630. spin_lock(&tid_agg_rx->reorder_lock);
  2631. /* release stored frames up to start of BAR */
  2632. ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
  2633. start_seq_num, frames);
  2634. spin_unlock(&tid_agg_rx->reorder_lock);
  2635. drv_event_callback(rx->local, rx->sdata, &event);
  2636. kfree_skb(skb);
  2637. return RX_QUEUED;
  2638. }
  2639. /*
  2640. * After this point, we only want management frames,
  2641. * so we can drop all remaining control frames to
  2642. * cooked monitor interfaces.
  2643. */
  2644. return RX_DROP_MONITOR;
  2645. }
  2646. static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
  2647. struct ieee80211_mgmt *mgmt,
  2648. size_t len)
  2649. {
  2650. struct ieee80211_local *local = sdata->local;
  2651. struct sk_buff *skb;
  2652. struct ieee80211_mgmt *resp;
  2653. if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
  2654. /* Not to own unicast address */
  2655. return;
  2656. }
  2657. if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
  2658. !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
  2659. /* Not from the current AP or not associated yet. */
  2660. return;
  2661. }
  2662. if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
  2663. /* Too short SA Query request frame */
  2664. return;
  2665. }
  2666. skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
  2667. if (skb == NULL)
  2668. return;
  2669. skb_reserve(skb, local->hw.extra_tx_headroom);
  2670. resp = skb_put_zero(skb, 24);
  2671. memcpy(resp->da, mgmt->sa, ETH_ALEN);
  2672. memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
  2673. memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
  2674. resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
  2675. IEEE80211_STYPE_ACTION);
  2676. skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
  2677. resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
  2678. resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
  2679. memcpy(resp->u.action.u.sa_query.trans_id,
  2680. mgmt->u.action.u.sa_query.trans_id,
  2681. WLAN_SA_QUERY_TR_ID_LEN);
  2682. ieee80211_tx_skb(sdata, skb);
  2683. }
  2684. static ieee80211_rx_result debug_noinline
  2685. ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
  2686. {
  2687. struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
  2688. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  2689. if (ieee80211_is_s1g_beacon(mgmt->frame_control))
  2690. return RX_CONTINUE;
  2691. /*
  2692. * From here on, look only at management frames.
  2693. * Data and control frames are already handled,
  2694. * and unknown (reserved) frames are useless.
  2695. */
  2696. if (rx->skb->len < 24)
  2697. return RX_DROP_MONITOR;
  2698. if (!ieee80211_is_mgmt(mgmt->frame_control))
  2699. return RX_DROP_MONITOR;
  2700. if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
  2701. ieee80211_is_beacon(mgmt->frame_control) &&
  2702. !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
  2703. int sig = 0;
  2704. if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
  2705. !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
  2706. sig = status->signal;
  2707. cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy,
  2708. rx->skb->data, rx->skb->len,
  2709. ieee80211_rx_status_to_khz(status),
  2710. sig);
  2711. rx->flags |= IEEE80211_RX_BEACON_REPORTED;
  2712. }
  2713. if (ieee80211_drop_unencrypted_mgmt(rx))
  2714. return RX_DROP_UNUSABLE;
  2715. return RX_CONTINUE;
  2716. }
  2717. static ieee80211_rx_result debug_noinline
  2718. ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
  2719. {
  2720. struct ieee80211_local *local = rx->local;
  2721. struct ieee80211_sub_if_data *sdata = rx->sdata;
  2722. struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
  2723. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  2724. int len = rx->skb->len;
  2725. if (!ieee80211_is_action(mgmt->frame_control))
  2726. return RX_CONTINUE;
  2727. /* drop too small frames */
  2728. if (len < IEEE80211_MIN_ACTION_SIZE)
  2729. return RX_DROP_UNUSABLE;
  2730. if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
  2731. mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
  2732. mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
  2733. return RX_DROP_UNUSABLE;
  2734. switch (mgmt->u.action.category) {
  2735. case WLAN_CATEGORY_HT:
  2736. /* reject HT action frames from stations not supporting HT */
  2737. if (!rx->sta->sta.ht_cap.ht_supported)
  2738. goto invalid;
  2739. if (sdata->vif.type != NL80211_IFTYPE_STATION &&
  2740. sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
  2741. sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
  2742. sdata->vif.type != NL80211_IFTYPE_AP &&
  2743. sdata->vif.type != NL80211_IFTYPE_ADHOC)
  2744. break;
  2745. /* verify action & smps_control/chanwidth are present */
  2746. if (len < IEEE80211_MIN_ACTION_SIZE + 2)
  2747. goto invalid;
  2748. switch (mgmt->u.action.u.ht_smps.action) {
  2749. case WLAN_HT_ACTION_SMPS: {
  2750. struct ieee80211_supported_band *sband;
  2751. enum ieee80211_smps_mode smps_mode;
  2752. struct sta_opmode_info sta_opmode = {};
  2753. if (sdata->vif.type != NL80211_IFTYPE_AP &&
  2754. sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
  2755. goto handled;
  2756. /* convert to HT capability */
  2757. switch (mgmt->u.action.u.ht_smps.smps_control) {
  2758. case WLAN_HT_SMPS_CONTROL_DISABLED:
  2759. smps_mode = IEEE80211_SMPS_OFF;
  2760. break;
  2761. case WLAN_HT_SMPS_CONTROL_STATIC:
  2762. smps_mode = IEEE80211_SMPS_STATIC;
  2763. break;
  2764. case WLAN_HT_SMPS_CONTROL_DYNAMIC:
  2765. smps_mode = IEEE80211_SMPS_DYNAMIC;
  2766. break;
  2767. default:
  2768. goto invalid;
  2769. }
  2770. /* if no change do nothing */
  2771. if (rx->sta->sta.smps_mode == smps_mode)
  2772. goto handled;
  2773. rx->sta->sta.smps_mode = smps_mode;
  2774. sta_opmode.smps_mode =
  2775. ieee80211_smps_mode_to_smps_mode(smps_mode);
  2776. sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED;
  2777. sband = rx->local->hw.wiphy->bands[status->band];
  2778. rate_control_rate_update(local, sband, rx->sta,
  2779. IEEE80211_RC_SMPS_CHANGED);
  2780. cfg80211_sta_opmode_change_notify(sdata->dev,
  2781. rx->sta->addr,
  2782. &sta_opmode,
  2783. GFP_ATOMIC);
  2784. goto handled;
  2785. }
  2786. case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
  2787. struct ieee80211_supported_band *sband;
  2788. u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
  2789. enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
  2790. struct sta_opmode_info sta_opmode = {};
  2791. /* If it doesn't support 40 MHz it can't change ... */
  2792. if (!(rx->sta->sta.ht_cap.cap &
  2793. IEEE80211_HT_CAP_SUP_WIDTH_20_40))
  2794. goto handled;
  2795. if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
  2796. max_bw = IEEE80211_STA_RX_BW_20;
  2797. else
  2798. max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
  2799. /* set cur_max_bandwidth and recalc sta bw */
  2800. rx->sta->cur_max_bandwidth = max_bw;
  2801. new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
  2802. if (rx->sta->sta.bandwidth == new_bw)
  2803. goto handled;
  2804. rx->sta->sta.bandwidth = new_bw;
  2805. sband = rx->local->hw.wiphy->bands[status->band];
  2806. sta_opmode.bw =
  2807. ieee80211_sta_rx_bw_to_chan_width(rx->sta);
  2808. sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
  2809. rate_control_rate_update(local, sband, rx->sta,
  2810. IEEE80211_RC_BW_CHANGED);
  2811. cfg80211_sta_opmode_change_notify(sdata->dev,
  2812. rx->sta->addr,
  2813. &sta_opmode,
  2814. GFP_ATOMIC);
  2815. goto handled;
  2816. }
  2817. default:
  2818. goto invalid;
  2819. }
  2820. break;
  2821. case WLAN_CATEGORY_PUBLIC:
  2822. if (len < IEEE80211_MIN_ACTION_SIZE + 1)
  2823. goto invalid;
  2824. if (sdata->vif.type != NL80211_IFTYPE_STATION)
  2825. break;
  2826. if (!rx->sta)
  2827. break;
  2828. if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
  2829. break;
  2830. if (mgmt->u.action.u.ext_chan_switch.action_code !=
  2831. WLAN_PUB_ACTION_EXT_CHANSW_ANN)
  2832. break;
  2833. if (len < offsetof(struct ieee80211_mgmt,
  2834. u.action.u.ext_chan_switch.variable))
  2835. goto invalid;
  2836. goto queue;
  2837. case WLAN_CATEGORY_VHT:
  2838. if (sdata->vif.type != NL80211_IFTYPE_STATION &&
  2839. sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
  2840. sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
  2841. sdata->vif.type != NL80211_IFTYPE_AP &&
  2842. sdata->vif.type != NL80211_IFTYPE_ADHOC)
  2843. break;
  2844. /* verify action code is present */
  2845. if (len < IEEE80211_MIN_ACTION_SIZE + 1)
  2846. goto invalid;
  2847. switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
  2848. case WLAN_VHT_ACTION_OPMODE_NOTIF: {
  2849. /* verify opmode is present */
  2850. if (len < IEEE80211_MIN_ACTION_SIZE + 2)
  2851. goto invalid;
  2852. goto queue;
  2853. }
  2854. case WLAN_VHT_ACTION_GROUPID_MGMT: {
  2855. if (len < IEEE80211_MIN_ACTION_SIZE + 25)
  2856. goto invalid;
  2857. goto queue;
  2858. }
  2859. default:
  2860. break;
  2861. }
  2862. break;
  2863. case WLAN_CATEGORY_BACK:
  2864. if (sdata->vif.type != NL80211_IFTYPE_STATION &&
  2865. sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
  2866. sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
  2867. sdata->vif.type != NL80211_IFTYPE_AP &&
  2868. sdata->vif.type != NL80211_IFTYPE_ADHOC)
  2869. break;
  2870. /* verify action_code is present */
  2871. if (len < IEEE80211_MIN_ACTION_SIZE + 1)
  2872. break;
  2873. switch (mgmt->u.action.u.addba_req.action_code) {
  2874. case WLAN_ACTION_ADDBA_REQ:
  2875. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2876. sizeof(mgmt->u.action.u.addba_req)))
  2877. goto invalid;
  2878. break;
  2879. case WLAN_ACTION_ADDBA_RESP:
  2880. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2881. sizeof(mgmt->u.action.u.addba_resp)))
  2882. goto invalid;
  2883. break;
  2884. case WLAN_ACTION_DELBA:
  2885. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2886. sizeof(mgmt->u.action.u.delba)))
  2887. goto invalid;
  2888. break;
  2889. default:
  2890. goto invalid;
  2891. }
  2892. goto queue;
  2893. case WLAN_CATEGORY_SPECTRUM_MGMT:
  2894. /* verify action_code is present */
  2895. if (len < IEEE80211_MIN_ACTION_SIZE + 1)
  2896. break;
  2897. switch (mgmt->u.action.u.measurement.action_code) {
  2898. case WLAN_ACTION_SPCT_MSR_REQ:
  2899. if (status->band != NL80211_BAND_5GHZ)
  2900. break;
  2901. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2902. sizeof(mgmt->u.action.u.measurement)))
  2903. break;
  2904. if (sdata->vif.type != NL80211_IFTYPE_STATION)
  2905. break;
  2906. ieee80211_process_measurement_req(sdata, mgmt, len);
  2907. goto handled;
  2908. case WLAN_ACTION_SPCT_CHL_SWITCH: {
  2909. u8 *bssid;
  2910. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2911. sizeof(mgmt->u.action.u.chan_switch)))
  2912. break;
  2913. if (sdata->vif.type != NL80211_IFTYPE_STATION &&
  2914. sdata->vif.type != NL80211_IFTYPE_ADHOC &&
  2915. sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
  2916. break;
  2917. if (sdata->vif.type == NL80211_IFTYPE_STATION)
  2918. bssid = sdata->u.mgd.bssid;
  2919. else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
  2920. bssid = sdata->u.ibss.bssid;
  2921. else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
  2922. bssid = mgmt->sa;
  2923. else
  2924. break;
  2925. if (!ether_addr_equal(mgmt->bssid, bssid))
  2926. break;
  2927. goto queue;
  2928. }
  2929. }
  2930. break;
  2931. case WLAN_CATEGORY_SELF_PROTECTED:
  2932. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2933. sizeof(mgmt->u.action.u.self_prot.action_code)))
  2934. break;
  2935. switch (mgmt->u.action.u.self_prot.action_code) {
  2936. case WLAN_SP_MESH_PEERING_OPEN:
  2937. case WLAN_SP_MESH_PEERING_CLOSE:
  2938. case WLAN_SP_MESH_PEERING_CONFIRM:
  2939. if (!ieee80211_vif_is_mesh(&sdata->vif))
  2940. goto invalid;
  2941. if (sdata->u.mesh.user_mpm)
  2942. /* userspace handles this frame */
  2943. break;
  2944. goto queue;
  2945. case WLAN_SP_MGK_INFORM:
  2946. case WLAN_SP_MGK_ACK:
  2947. if (!ieee80211_vif_is_mesh(&sdata->vif))
  2948. goto invalid;
  2949. break;
  2950. }
  2951. break;
  2952. case WLAN_CATEGORY_MESH_ACTION:
  2953. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2954. sizeof(mgmt->u.action.u.mesh_action.action_code)))
  2955. break;
  2956. if (!ieee80211_vif_is_mesh(&sdata->vif))
  2957. break;
  2958. if (mesh_action_is_path_sel(mgmt) &&
  2959. !mesh_path_sel_is_hwmp(sdata))
  2960. break;
  2961. goto queue;
  2962. }
  2963. return RX_CONTINUE;
  2964. invalid:
  2965. status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
  2966. /* will return in the next handlers */
  2967. return RX_CONTINUE;
  2968. handled:
  2969. if (rx->sta)
  2970. rx->sta->rx_stats.packets++;
  2971. dev_kfree_skb(rx->skb);
  2972. return RX_QUEUED;
  2973. queue:
  2974. skb_queue_tail(&sdata->skb_queue, rx->skb);
  2975. ieee80211_queue_work(&local->hw, &sdata->work);
  2976. if (rx->sta)
  2977. rx->sta->rx_stats.packets++;
  2978. return RX_QUEUED;
  2979. }
  2980. static ieee80211_rx_result debug_noinline
  2981. ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
  2982. {
  2983. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  2984. int sig = 0;
  2985. /* skip known-bad action frames and return them in the next handler */
  2986. if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
  2987. return RX_CONTINUE;
  2988. /*
  2989. * Getting here means the kernel doesn't know how to handle
  2990. * it, but maybe userspace does ... include returned frames
  2991. * so userspace can register for those to know whether ones
  2992. * it transmitted were processed or returned.
  2993. */
  2994. if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
  2995. !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
  2996. sig = status->signal;
  2997. if (cfg80211_rx_mgmt_khz(&rx->sdata->wdev,
  2998. ieee80211_rx_status_to_khz(status), sig,
  2999. rx->skb->data, rx->skb->len, 0)) {
  3000. if (rx->sta)
  3001. rx->sta->rx_stats.packets++;
  3002. dev_kfree_skb(rx->skb);
  3003. return RX_QUEUED;
  3004. }
  3005. return RX_CONTINUE;
  3006. }
  3007. static ieee80211_rx_result debug_noinline
  3008. ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx)
  3009. {
  3010. struct ieee80211_sub_if_data *sdata = rx->sdata;
  3011. struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
  3012. int len = rx->skb->len;
  3013. if (!ieee80211_is_action(mgmt->frame_control))
  3014. return RX_CONTINUE;
  3015. switch (mgmt->u.action.category) {
  3016. case WLAN_CATEGORY_SA_QUERY:
  3017. if (len < (IEEE80211_MIN_ACTION_SIZE +
  3018. sizeof(mgmt->u.action.u.sa_query)))
  3019. break;
  3020. switch (mgmt->u.action.u.sa_query.action) {
  3021. case WLAN_ACTION_SA_QUERY_REQUEST:
  3022. if (sdata->vif.type != NL80211_IFTYPE_STATION)
  3023. break;
  3024. ieee80211_process_sa_query_req(sdata, mgmt, len);
  3025. goto handled;
  3026. }
  3027. break;
  3028. }
  3029. return RX_CONTINUE;
  3030. handled:
  3031. if (rx->sta)
  3032. rx->sta->rx_stats.packets++;
  3033. dev_kfree_skb(rx->skb);
  3034. return RX_QUEUED;
  3035. }
  3036. static ieee80211_rx_result debug_noinline
  3037. ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
  3038. {
  3039. struct ieee80211_local *local = rx->local;
  3040. struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
  3041. struct sk_buff *nskb;
  3042. struct ieee80211_sub_if_data *sdata = rx->sdata;
  3043. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  3044. if (!ieee80211_is_action(mgmt->frame_control))
  3045. return RX_CONTINUE;
  3046. /*
  3047. * For AP mode, hostapd is responsible for handling any action
  3048. * frames that we didn't handle, including returning unknown
  3049. * ones. For all other modes we will return them to the sender,
  3050. * setting the 0x80 bit in the action category, as required by
  3051. * 802.11-2012 9.24.4.
  3052. * Newer versions of hostapd shall also use the management frame
  3053. * registration mechanisms, but older ones still use cooked
  3054. * monitor interfaces so push all frames there.
  3055. */
  3056. if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
  3057. (sdata->vif.type == NL80211_IFTYPE_AP ||
  3058. sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
  3059. return RX_DROP_MONITOR;
  3060. if (is_multicast_ether_addr(mgmt->da))
  3061. return RX_DROP_MONITOR;
  3062. /* do not return rejected action frames */
  3063. if (mgmt->u.action.category & 0x80)
  3064. return RX_DROP_UNUSABLE;
  3065. nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
  3066. GFP_ATOMIC);
  3067. if (nskb) {
  3068. struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
  3069. nmgmt->u.action.category |= 0x80;
  3070. memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
  3071. memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
  3072. memset(nskb->cb, 0, sizeof(nskb->cb));
  3073. if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
  3074. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
  3075. info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
  3076. IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
  3077. IEEE80211_TX_CTL_NO_CCK_RATE;
  3078. if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
  3079. info->hw_queue =
  3080. local->hw.offchannel_tx_hw_queue;
  3081. }
  3082. __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
  3083. status->band);
  3084. }
  3085. dev_kfree_skb(rx->skb);
  3086. return RX_QUEUED;
  3087. }
  3088. static ieee80211_rx_result debug_noinline
  3089. ieee80211_rx_h_ext(struct ieee80211_rx_data *rx)
  3090. {
  3091. struct ieee80211_sub_if_data *sdata = rx->sdata;
  3092. struct ieee80211_hdr *hdr = (void *)rx->skb->data;
  3093. if (!ieee80211_is_ext(hdr->frame_control))
  3094. return RX_CONTINUE;
  3095. if (sdata->vif.type != NL80211_IFTYPE_STATION)
  3096. return RX_DROP_MONITOR;
  3097. /* for now only beacons are ext, so queue them */
  3098. skb_queue_tail(&sdata->skb_queue, rx->skb);
  3099. ieee80211_queue_work(&rx->local->hw, &sdata->work);
  3100. if (rx->sta)
  3101. rx->sta->rx_stats.packets++;
  3102. return RX_QUEUED;
  3103. }
  3104. static ieee80211_rx_result debug_noinline
  3105. ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
  3106. {
  3107. struct ieee80211_sub_if_data *sdata = rx->sdata;
  3108. struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
  3109. __le16 stype;
  3110. stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
  3111. if (!ieee80211_vif_is_mesh(&sdata->vif) &&
  3112. sdata->vif.type != NL80211_IFTYPE_ADHOC &&
  3113. sdata->vif.type != NL80211_IFTYPE_OCB &&
  3114. sdata->vif.type != NL80211_IFTYPE_STATION)
  3115. return RX_DROP_MONITOR;
  3116. switch (stype) {
  3117. case cpu_to_le16(IEEE80211_STYPE_AUTH):
  3118. case cpu_to_le16(IEEE80211_STYPE_BEACON):
  3119. case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
  3120. /* process for all: mesh, mlme, ibss */
  3121. break;
  3122. case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
  3123. if (is_multicast_ether_addr(mgmt->da) &&
  3124. !is_broadcast_ether_addr(mgmt->da))
  3125. return RX_DROP_MONITOR;
  3126. /* process only for station/IBSS */
  3127. if (sdata->vif.type != NL80211_IFTYPE_STATION &&
  3128. sdata->vif.type != NL80211_IFTYPE_ADHOC)
  3129. return RX_DROP_MONITOR;
  3130. break;
  3131. case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
  3132. case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
  3133. case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
  3134. if (is_multicast_ether_addr(mgmt->da) &&
  3135. !is_broadcast_ether_addr(mgmt->da))
  3136. return RX_DROP_MONITOR;
  3137. /* process only for station */
  3138. if (sdata->vif.type != NL80211_IFTYPE_STATION)
  3139. return RX_DROP_MONITOR;
  3140. break;
  3141. case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
  3142. /* process only for ibss and mesh */
  3143. if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
  3144. sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
  3145. return RX_DROP_MONITOR;
  3146. break;
  3147. default:
  3148. return RX_DROP_MONITOR;
  3149. }
  3150. /* queue up frame and kick off work to process it */
  3151. skb_queue_tail(&sdata->skb_queue, rx->skb);
  3152. ieee80211_queue_work(&rx->local->hw, &sdata->work);
  3153. if (rx->sta)
  3154. rx->sta->rx_stats.packets++;
  3155. return RX_QUEUED;
  3156. }
  3157. static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
  3158. struct ieee80211_rate *rate)
  3159. {
  3160. struct ieee80211_sub_if_data *sdata;
  3161. struct ieee80211_local *local = rx->local;
  3162. struct sk_buff *skb = rx->skb, *skb2;
  3163. struct net_device *prev_dev = NULL;
  3164. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  3165. int needed_headroom;
  3166. /*
  3167. * If cooked monitor has been processed already, then
  3168. * don't do it again. If not, set the flag.
  3169. */
  3170. if (rx->flags & IEEE80211_RX_CMNTR)
  3171. goto out_free_skb;
  3172. rx->flags |= IEEE80211_RX_CMNTR;
  3173. /* If there are no cooked monitor interfaces, just free the SKB */
  3174. if (!local->cooked_mntrs)
  3175. goto out_free_skb;
  3176. /* vendor data is long removed here */
  3177. status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA;
  3178. /* room for the radiotap header based on driver features */
  3179. needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
  3180. if (skb_headroom(skb) < needed_headroom &&
  3181. pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
  3182. goto out_free_skb;
  3183. /* prepend radiotap information */
  3184. ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
  3185. false);
  3186. skb_reset_mac_header(skb);
  3187. skb->ip_summed = CHECKSUM_UNNECESSARY;
  3188. skb->pkt_type = PACKET_OTHERHOST;
  3189. skb->protocol = htons(ETH_P_802_2);
  3190. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  3191. if (!ieee80211_sdata_running(sdata))
  3192. continue;
  3193. if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
  3194. !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
  3195. continue;
  3196. if (prev_dev) {
  3197. skb2 = skb_clone(skb, GFP_ATOMIC);
  3198. if (skb2) {
  3199. skb2->dev = prev_dev;
  3200. netif_receive_skb(skb2);
  3201. }
  3202. }
  3203. prev_dev = sdata->dev;
  3204. ieee80211_rx_stats(sdata->dev, skb->len);
  3205. }
  3206. if (prev_dev) {
  3207. skb->dev = prev_dev;
  3208. netif_receive_skb(skb);
  3209. return;
  3210. }
  3211. out_free_skb:
  3212. dev_kfree_skb(skb);
  3213. }
  3214. static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
  3215. ieee80211_rx_result res)
  3216. {
  3217. switch (res) {
  3218. case RX_DROP_MONITOR:
  3219. I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
  3220. if (rx->sta)
  3221. rx->sta->rx_stats.dropped++;
  3222. fallthrough;
  3223. case RX_CONTINUE: {
  3224. struct ieee80211_rate *rate = NULL;
  3225. struct ieee80211_supported_band *sband;
  3226. struct ieee80211_rx_status *status;
  3227. status = IEEE80211_SKB_RXCB((rx->skb));
  3228. sband = rx->local->hw.wiphy->bands[status->band];
  3229. if (status->encoding == RX_ENC_LEGACY)
  3230. rate = &sband->bitrates[status->rate_idx];
  3231. ieee80211_rx_cooked_monitor(rx, rate);
  3232. break;
  3233. }
  3234. case RX_DROP_UNUSABLE:
  3235. I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
  3236. if (rx->sta)
  3237. rx->sta->rx_stats.dropped++;
  3238. dev_kfree_skb(rx->skb);
  3239. break;
  3240. case RX_QUEUED:
  3241. I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
  3242. break;
  3243. }
  3244. }
  3245. static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
  3246. struct sk_buff_head *frames)
  3247. {
  3248. ieee80211_rx_result res = RX_DROP_MONITOR;
  3249. struct sk_buff *skb;
  3250. #define CALL_RXH(rxh) \
  3251. do { \
  3252. res = rxh(rx); \
  3253. if (res != RX_CONTINUE) \
  3254. goto rxh_next; \
  3255. } while (0)
  3256. /* Lock here to avoid hitting all of the data used in the RX
  3257. * path (e.g. key data, station data, ...) concurrently when
  3258. * a frame is released from the reorder buffer due to timeout
  3259. * from the timer, potentially concurrently with RX from the
  3260. * driver.
  3261. */
  3262. spin_lock_bh(&rx->local->rx_path_lock);
  3263. while ((skb = __skb_dequeue(frames))) {
  3264. /*
  3265. * all the other fields are valid across frames
  3266. * that belong to an aMPDU since they are on the
  3267. * same TID from the same station
  3268. */
  3269. rx->skb = skb;
  3270. CALL_RXH(ieee80211_rx_h_check_more_data);
  3271. CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
  3272. CALL_RXH(ieee80211_rx_h_sta_process);
  3273. CALL_RXH(ieee80211_rx_h_decrypt);
  3274. CALL_RXH(ieee80211_rx_h_defragment);
  3275. CALL_RXH(ieee80211_rx_h_michael_mic_verify);
  3276. /* must be after MMIC verify so header is counted in MPDU mic */
  3277. #ifdef CONFIG_MAC80211_MESH
  3278. if (ieee80211_vif_is_mesh(&rx->sdata->vif))
  3279. CALL_RXH(ieee80211_rx_h_mesh_fwding);
  3280. #endif
  3281. CALL_RXH(ieee80211_rx_h_amsdu);
  3282. CALL_RXH(ieee80211_rx_h_data);
  3283. /* special treatment -- needs the queue */
  3284. res = ieee80211_rx_h_ctrl(rx, frames);
  3285. if (res != RX_CONTINUE)
  3286. goto rxh_next;
  3287. CALL_RXH(ieee80211_rx_h_mgmt_check);
  3288. CALL_RXH(ieee80211_rx_h_action);
  3289. CALL_RXH(ieee80211_rx_h_userspace_mgmt);
  3290. CALL_RXH(ieee80211_rx_h_action_post_userspace);
  3291. CALL_RXH(ieee80211_rx_h_action_return);
  3292. CALL_RXH(ieee80211_rx_h_ext);
  3293. CALL_RXH(ieee80211_rx_h_mgmt);
  3294. rxh_next:
  3295. ieee80211_rx_handlers_result(rx, res);
  3296. #undef CALL_RXH
  3297. }
  3298. spin_unlock_bh(&rx->local->rx_path_lock);
  3299. }
  3300. static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
  3301. {
  3302. struct sk_buff_head reorder_release;
  3303. ieee80211_rx_result res = RX_DROP_MONITOR;
  3304. __skb_queue_head_init(&reorder_release);
  3305. #define CALL_RXH(rxh) \
  3306. do { \
  3307. res = rxh(rx); \
  3308. if (res != RX_CONTINUE) \
  3309. goto rxh_next; \
  3310. } while (0)
  3311. CALL_RXH(ieee80211_rx_h_check_dup);
  3312. CALL_RXH(ieee80211_rx_h_check);
  3313. ieee80211_rx_reorder_ampdu(rx, &reorder_release);
  3314. ieee80211_rx_handlers(rx, &reorder_release);
  3315. return;
  3316. rxh_next:
  3317. ieee80211_rx_handlers_result(rx, res);
  3318. #undef CALL_RXH
  3319. }
  3320. /*
  3321. * This function makes calls into the RX path, therefore
  3322. * it has to be invoked under RCU read lock.
  3323. */
  3324. void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
  3325. {
  3326. struct sk_buff_head frames;
  3327. struct ieee80211_rx_data rx = {
  3328. .sta = sta,
  3329. .sdata = sta->sdata,
  3330. .local = sta->local,
  3331. /* This is OK -- must be QoS data frame */
  3332. .security_idx = tid,
  3333. .seqno_idx = tid,
  3334. };
  3335. struct tid_ampdu_rx *tid_agg_rx;
  3336. tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
  3337. if (!tid_agg_rx)
  3338. return;
  3339. __skb_queue_head_init(&frames);
  3340. spin_lock(&tid_agg_rx->reorder_lock);
  3341. ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
  3342. spin_unlock(&tid_agg_rx->reorder_lock);
  3343. if (!skb_queue_empty(&frames)) {
  3344. struct ieee80211_event event = {
  3345. .type = BA_FRAME_TIMEOUT,
  3346. .u.ba.tid = tid,
  3347. .u.ba.sta = &sta->sta,
  3348. };
  3349. drv_event_callback(rx.local, rx.sdata, &event);
  3350. }
  3351. ieee80211_rx_handlers(&rx, &frames);
  3352. }
  3353. void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
  3354. u16 ssn, u64 filtered,
  3355. u16 received_mpdus)
  3356. {
  3357. struct sta_info *sta;
  3358. struct tid_ampdu_rx *tid_agg_rx;
  3359. struct sk_buff_head frames;
  3360. struct ieee80211_rx_data rx = {
  3361. /* This is OK -- must be QoS data frame */
  3362. .security_idx = tid,
  3363. .seqno_idx = tid,
  3364. };
  3365. int i, diff;
  3366. if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
  3367. return;
  3368. __skb_queue_head_init(&frames);
  3369. sta = container_of(pubsta, struct sta_info, sta);
  3370. rx.sta = sta;
  3371. rx.sdata = sta->sdata;
  3372. rx.local = sta->local;
  3373. rcu_read_lock();
  3374. tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
  3375. if (!tid_agg_rx)
  3376. goto out;
  3377. spin_lock_bh(&tid_agg_rx->reorder_lock);
  3378. if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
  3379. int release;
  3380. /* release all frames in the reorder buffer */
  3381. release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
  3382. IEEE80211_SN_MODULO;
  3383. ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
  3384. release, &frames);
  3385. /* update ssn to match received ssn */
  3386. tid_agg_rx->head_seq_num = ssn;
  3387. } else {
  3388. ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
  3389. &frames);
  3390. }
  3391. /* handle the case that received ssn is behind the mac ssn.
  3392. * it can be tid_agg_rx->buf_size behind and still be valid */
  3393. diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
  3394. if (diff >= tid_agg_rx->buf_size) {
  3395. tid_agg_rx->reorder_buf_filtered = 0;
  3396. goto release;
  3397. }
  3398. filtered = filtered >> diff;
  3399. ssn += diff;
  3400. /* update bitmap */
  3401. for (i = 0; i < tid_agg_rx->buf_size; i++) {
  3402. int index = (ssn + i) % tid_agg_rx->buf_size;
  3403. tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
  3404. if (filtered & BIT_ULL(i))
  3405. tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
  3406. }
  3407. /* now process also frames that the filter marking released */
  3408. ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
  3409. release:
  3410. spin_unlock_bh(&tid_agg_rx->reorder_lock);
  3411. ieee80211_rx_handlers(&rx, &frames);
  3412. out:
  3413. rcu_read_unlock();
  3414. }
  3415. EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
  3416. /* main receive path */
  3417. static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
  3418. {
  3419. struct ieee80211_sub_if_data *sdata = rx->sdata;
  3420. struct sk_buff *skb = rx->skb;
  3421. struct ieee80211_hdr *hdr = (void *)skb->data;
  3422. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  3423. u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
  3424. bool multicast = is_multicast_ether_addr(hdr->addr1) ||
  3425. ieee80211_is_s1g_beacon(hdr->frame_control);
  3426. switch (sdata->vif.type) {
  3427. case NL80211_IFTYPE_STATION:
  3428. if (!bssid && !sdata->u.mgd.use_4addr)
  3429. return false;
  3430. if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
  3431. return false;
  3432. if (multicast)
  3433. return true;
  3434. return ether_addr_equal(sdata->vif.addr, hdr->addr1);
  3435. case NL80211_IFTYPE_ADHOC:
  3436. if (!bssid)
  3437. return false;
  3438. if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
  3439. ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
  3440. !is_valid_ether_addr(hdr->addr2))
  3441. return false;
  3442. if (ieee80211_is_beacon(hdr->frame_control))
  3443. return true;
  3444. if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
  3445. return false;
  3446. if (!multicast &&
  3447. !ether_addr_equal(sdata->vif.addr, hdr->addr1))
  3448. return false;
  3449. if (!rx->sta) {
  3450. int rate_idx;
  3451. if (status->encoding != RX_ENC_LEGACY)
  3452. rate_idx = 0; /* TODO: HT/VHT rates */
  3453. else
  3454. rate_idx = status->rate_idx;
  3455. ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
  3456. BIT(rate_idx));
  3457. }
  3458. return true;
  3459. case NL80211_IFTYPE_OCB:
  3460. if (!bssid)
  3461. return false;
  3462. if (!ieee80211_is_data_present(hdr->frame_control))
  3463. return false;
  3464. if (!is_broadcast_ether_addr(bssid))
  3465. return false;
  3466. if (!multicast &&
  3467. !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
  3468. return false;
  3469. if (!rx->sta) {
  3470. int rate_idx;
  3471. if (status->encoding != RX_ENC_LEGACY)
  3472. rate_idx = 0; /* TODO: HT rates */
  3473. else
  3474. rate_idx = status->rate_idx;
  3475. ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
  3476. BIT(rate_idx));
  3477. }
  3478. return true;
  3479. case NL80211_IFTYPE_MESH_POINT:
  3480. if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
  3481. return false;
  3482. if (multicast)
  3483. return true;
  3484. return ether_addr_equal(sdata->vif.addr, hdr->addr1);
  3485. case NL80211_IFTYPE_AP_VLAN:
  3486. case NL80211_IFTYPE_AP:
  3487. if (!bssid)
  3488. return ether_addr_equal(sdata->vif.addr, hdr->addr1);
  3489. if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
  3490. /*
  3491. * Accept public action frames even when the
  3492. * BSSID doesn't match, this is used for P2P
  3493. * and location updates. Note that mac80211
  3494. * itself never looks at these frames.
  3495. */
  3496. if (!multicast &&
  3497. !ether_addr_equal(sdata->vif.addr, hdr->addr1))
  3498. return false;
  3499. if (ieee80211_is_public_action(hdr, skb->len))
  3500. return true;
  3501. return ieee80211_is_beacon(hdr->frame_control);
  3502. }
  3503. if (!ieee80211_has_tods(hdr->frame_control)) {
  3504. /* ignore data frames to TDLS-peers */
  3505. if (ieee80211_is_data(hdr->frame_control))
  3506. return false;
  3507. /* ignore action frames to TDLS-peers */
  3508. if (ieee80211_is_action(hdr->frame_control) &&
  3509. !is_broadcast_ether_addr(bssid) &&
  3510. !ether_addr_equal(bssid, hdr->addr1))
  3511. return false;
  3512. }
  3513. /*
  3514. * 802.11-2016 Table 9-26 says that for data frames, A1 must be
  3515. * the BSSID - we've checked that already but may have accepted
  3516. * the wildcard (ff:ff:ff:ff:ff:ff).
  3517. *
  3518. * It also says:
  3519. * The BSSID of the Data frame is determined as follows:
  3520. * a) If the STA is contained within an AP or is associated
  3521. * with an AP, the BSSID is the address currently in use
  3522. * by the STA contained in the AP.
  3523. *
  3524. * So we should not accept data frames with an address that's
  3525. * multicast.
  3526. *
  3527. * Accepting it also opens a security problem because stations
  3528. * could encrypt it with the GTK and inject traffic that way.
  3529. */
  3530. if (ieee80211_is_data(hdr->frame_control) && multicast)
  3531. return false;
  3532. return true;
  3533. case NL80211_IFTYPE_WDS:
  3534. if (bssid || !ieee80211_is_data(hdr->frame_control))
  3535. return false;
  3536. return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2);
  3537. case NL80211_IFTYPE_P2P_DEVICE:
  3538. return ieee80211_is_public_action(hdr, skb->len) ||
  3539. ieee80211_is_probe_req(hdr->frame_control) ||
  3540. ieee80211_is_probe_resp(hdr->frame_control) ||
  3541. ieee80211_is_beacon(hdr->frame_control);
  3542. case NL80211_IFTYPE_NAN:
  3543. /* Currently no frames on NAN interface are allowed */
  3544. return false;
  3545. default:
  3546. break;
  3547. }
  3548. WARN_ON_ONCE(1);
  3549. return false;
  3550. }
  3551. void ieee80211_check_fast_rx(struct sta_info *sta)
  3552. {
  3553. struct ieee80211_sub_if_data *sdata = sta->sdata;
  3554. struct ieee80211_local *local = sdata->local;
  3555. struct ieee80211_key *key;
  3556. struct ieee80211_fast_rx fastrx = {
  3557. .dev = sdata->dev,
  3558. .vif_type = sdata->vif.type,
  3559. .control_port_protocol = sdata->control_port_protocol,
  3560. }, *old, *new = NULL;
  3561. bool assign = false;
  3562. /* use sparse to check that we don't return without updating */
  3563. __acquire(check_fast_rx);
  3564. BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
  3565. BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
  3566. ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
  3567. ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
  3568. fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
  3569. /* fast-rx doesn't do reordering */
  3570. if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
  3571. !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
  3572. goto clear;
  3573. switch (sdata->vif.type) {
  3574. case NL80211_IFTYPE_STATION:
  3575. if (sta->sta.tdls) {
  3576. fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
  3577. fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
  3578. fastrx.expected_ds_bits = 0;
  3579. } else {
  3580. fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
  3581. fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
  3582. fastrx.expected_ds_bits =
  3583. cpu_to_le16(IEEE80211_FCTL_FROMDS);
  3584. }
  3585. if (sdata->u.mgd.use_4addr && !sta->sta.tdls) {
  3586. fastrx.expected_ds_bits |=
  3587. cpu_to_le16(IEEE80211_FCTL_TODS);
  3588. fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
  3589. fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
  3590. }
  3591. if (!sdata->u.mgd.powersave)
  3592. break;
  3593. /* software powersave is a huge mess, avoid all of it */
  3594. if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
  3595. goto clear;
  3596. if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
  3597. !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
  3598. goto clear;
  3599. break;
  3600. case NL80211_IFTYPE_AP_VLAN:
  3601. case NL80211_IFTYPE_AP:
  3602. /* parallel-rx requires this, at least with calls to
  3603. * ieee80211_sta_ps_transition()
  3604. */
  3605. if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
  3606. goto clear;
  3607. fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
  3608. fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
  3609. fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
  3610. fastrx.internal_forward =
  3611. !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
  3612. (sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
  3613. !sdata->u.vlan.sta);
  3614. if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
  3615. sdata->u.vlan.sta) {
  3616. fastrx.expected_ds_bits |=
  3617. cpu_to_le16(IEEE80211_FCTL_FROMDS);
  3618. fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
  3619. fastrx.internal_forward = 0;
  3620. }
  3621. break;
  3622. default:
  3623. goto clear;
  3624. }
  3625. if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
  3626. goto clear;
  3627. rcu_read_lock();
  3628. key = rcu_dereference(sta->ptk[sta->ptk_idx]);
  3629. if (!key)
  3630. key = rcu_dereference(sdata->default_unicast_key);
  3631. if (key) {
  3632. switch (key->conf.cipher) {
  3633. case WLAN_CIPHER_SUITE_TKIP:
  3634. /* we don't want to deal with MMIC in fast-rx */
  3635. goto clear_rcu;
  3636. case WLAN_CIPHER_SUITE_CCMP:
  3637. case WLAN_CIPHER_SUITE_CCMP_256:
  3638. case WLAN_CIPHER_SUITE_GCMP:
  3639. case WLAN_CIPHER_SUITE_GCMP_256:
  3640. break;
  3641. default:
  3642. /* We also don't want to deal with
  3643. * WEP or cipher scheme.
  3644. */
  3645. goto clear_rcu;
  3646. }
  3647. fastrx.key = true;
  3648. fastrx.icv_len = key->conf.icv_len;
  3649. }
  3650. assign = true;
  3651. clear_rcu:
  3652. rcu_read_unlock();
  3653. clear:
  3654. __release(check_fast_rx);
  3655. if (assign)
  3656. new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
  3657. spin_lock_bh(&sta->lock);
  3658. old = rcu_dereference_protected(sta->fast_rx, true);
  3659. rcu_assign_pointer(sta->fast_rx, new);
  3660. spin_unlock_bh(&sta->lock);
  3661. if (old)
  3662. kfree_rcu(old, rcu_head);
  3663. }
  3664. void ieee80211_clear_fast_rx(struct sta_info *sta)
  3665. {
  3666. struct ieee80211_fast_rx *old;
  3667. spin_lock_bh(&sta->lock);
  3668. old = rcu_dereference_protected(sta->fast_rx, true);
  3669. RCU_INIT_POINTER(sta->fast_rx, NULL);
  3670. spin_unlock_bh(&sta->lock);
  3671. if (old)
  3672. kfree_rcu(old, rcu_head);
  3673. }
  3674. void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
  3675. {
  3676. struct ieee80211_local *local = sdata->local;
  3677. struct sta_info *sta;
  3678. lockdep_assert_held(&local->sta_mtx);
  3679. list_for_each_entry(sta, &local->sta_list, list) {
  3680. if (sdata != sta->sdata &&
  3681. (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
  3682. continue;
  3683. ieee80211_check_fast_rx(sta);
  3684. }
  3685. }
  3686. void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
  3687. {
  3688. struct ieee80211_local *local = sdata->local;
  3689. mutex_lock(&local->sta_mtx);
  3690. __ieee80211_check_fast_rx_iface(sdata);
  3691. mutex_unlock(&local->sta_mtx);
  3692. }
  3693. static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
  3694. struct ieee80211_fast_rx *fast_rx)
  3695. {
  3696. struct sk_buff *skb = rx->skb;
  3697. struct ieee80211_hdr *hdr = (void *)skb->data;
  3698. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  3699. struct sta_info *sta = rx->sta;
  3700. int orig_len = skb->len;
  3701. int hdrlen = ieee80211_hdrlen(hdr->frame_control);
  3702. int snap_offs = hdrlen;
  3703. struct {
  3704. u8 snap[sizeof(rfc1042_header)];
  3705. __be16 proto;
  3706. } *payload __aligned(2);
  3707. struct {
  3708. u8 da[ETH_ALEN];
  3709. u8 sa[ETH_ALEN];
  3710. } addrs __aligned(2);
  3711. struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
  3712. if (fast_rx->uses_rss)
  3713. stats = this_cpu_ptr(sta->pcpu_rx_stats);
  3714. /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
  3715. * to a common data structure; drivers can implement that per queue
  3716. * but we don't have that information in mac80211
  3717. */
  3718. if (!(status->flag & RX_FLAG_DUP_VALIDATED))
  3719. return false;
  3720. #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
  3721. /* If using encryption, we also need to have:
  3722. * - PN_VALIDATED: similar, but the implementation is tricky
  3723. * - DECRYPTED: necessary for PN_VALIDATED
  3724. */
  3725. if (fast_rx->key &&
  3726. (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
  3727. return false;
  3728. if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
  3729. return false;
  3730. if (unlikely(ieee80211_is_frag(hdr)))
  3731. return false;
  3732. /* Since our interface address cannot be multicast, this
  3733. * implicitly also rejects multicast frames without the
  3734. * explicit check.
  3735. *
  3736. * We shouldn't get any *data* frames not addressed to us
  3737. * (AP mode will accept multicast *management* frames), but
  3738. * punting here will make it go through the full checks in
  3739. * ieee80211_accept_frame().
  3740. */
  3741. if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
  3742. return false;
  3743. if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
  3744. IEEE80211_FCTL_TODS)) !=
  3745. fast_rx->expected_ds_bits)
  3746. return false;
  3747. /* assign the key to drop unencrypted frames (later)
  3748. * and strip the IV/MIC if necessary
  3749. */
  3750. if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
  3751. /* GCMP header length is the same */
  3752. snap_offs += IEEE80211_CCMP_HDR_LEN;
  3753. }
  3754. if (!(status->rx_flags & IEEE80211_RX_AMSDU)) {
  3755. if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
  3756. goto drop;
  3757. payload = (void *)(skb->data + snap_offs);
  3758. if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
  3759. return false;
  3760. /* Don't handle these here since they require special code.
  3761. * Accept AARP and IPX even though they should come with a
  3762. * bridge-tunnel header - but if we get them this way then
  3763. * there's little point in discarding them.
  3764. */
  3765. if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
  3766. payload->proto == fast_rx->control_port_protocol))
  3767. return false;
  3768. }
  3769. /* after this point, don't punt to the slowpath! */
  3770. if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
  3771. pskb_trim(skb, skb->len - fast_rx->icv_len))
  3772. goto drop;
  3773. /* statistics part of ieee80211_rx_h_sta_process() */
  3774. if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
  3775. stats->last_signal = status->signal;
  3776. if (!fast_rx->uses_rss)
  3777. ewma_signal_add(&sta->rx_stats_avg.signal,
  3778. -status->signal);
  3779. }
  3780. if (status->chains) {
  3781. int i;
  3782. stats->chains = status->chains;
  3783. for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
  3784. int signal = status->chain_signal[i];
  3785. if (!(status->chains & BIT(i)))
  3786. continue;
  3787. stats->chain_signal_last[i] = signal;
  3788. if (!fast_rx->uses_rss)
  3789. ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
  3790. -signal);
  3791. }
  3792. }
  3793. /* end of statistics */
  3794. if (rx->key && !ieee80211_has_protected(hdr->frame_control))
  3795. goto drop;
  3796. if (status->rx_flags & IEEE80211_RX_AMSDU) {
  3797. if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) !=
  3798. RX_QUEUED)
  3799. goto drop;
  3800. return true;
  3801. }
  3802. stats->last_rx = jiffies;
  3803. stats->last_rate = sta_stats_encode_rate(status);
  3804. stats->fragments++;
  3805. stats->packets++;
  3806. /* do the header conversion - first grab the addresses */
  3807. ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
  3808. ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
  3809. /* remove the SNAP but leave the ethertype */
  3810. skb_pull(skb, snap_offs + sizeof(rfc1042_header));
  3811. /* push the addresses in front */
  3812. memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
  3813. skb->dev = fast_rx->dev;
  3814. ieee80211_rx_stats(fast_rx->dev, skb->len);
  3815. /* The seqno index has the same property as needed
  3816. * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
  3817. * for non-QoS-data frames. Here we know it's a data
  3818. * frame, so count MSDUs.
  3819. */
  3820. u64_stats_update_begin(&stats->syncp);
  3821. stats->msdu[rx->seqno_idx]++;
  3822. stats->bytes += orig_len;
  3823. u64_stats_update_end(&stats->syncp);
  3824. if (fast_rx->internal_forward) {
  3825. struct sk_buff *xmit_skb = NULL;
  3826. if (is_multicast_ether_addr(addrs.da)) {
  3827. xmit_skb = skb_copy(skb, GFP_ATOMIC);
  3828. } else if (!ether_addr_equal(addrs.da, addrs.sa) &&
  3829. sta_info_get(rx->sdata, addrs.da)) {
  3830. xmit_skb = skb;
  3831. skb = NULL;
  3832. }
  3833. if (xmit_skb) {
  3834. /*
  3835. * Send to wireless media and increase priority by 256
  3836. * to keep the received priority instead of
  3837. * reclassifying the frame (see cfg80211_classify8021d).
  3838. */
  3839. xmit_skb->priority += 256;
  3840. xmit_skb->protocol = htons(ETH_P_802_3);
  3841. skb_reset_network_header(xmit_skb);
  3842. skb_reset_mac_header(xmit_skb);
  3843. dev_queue_xmit(xmit_skb);
  3844. }
  3845. if (!skb)
  3846. return true;
  3847. }
  3848. /* deliver to local stack */
  3849. skb->protocol = eth_type_trans(skb, fast_rx->dev);
  3850. memset(skb->cb, 0, sizeof(skb->cb));
  3851. if (rx->list)
  3852. list_add_tail(&skb->list, rx->list);
  3853. else
  3854. netif_receive_skb(skb);
  3855. return true;
  3856. drop:
  3857. dev_kfree_skb(skb);
  3858. stats->dropped++;
  3859. return true;
  3860. }
  3861. /*
  3862. * This function returns whether or not the SKB
  3863. * was destined for RX processing or not, which,
  3864. * if consume is true, is equivalent to whether
  3865. * or not the skb was consumed.
  3866. */
  3867. static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
  3868. struct sk_buff *skb, bool consume)
  3869. {
  3870. struct ieee80211_local *local = rx->local;
  3871. struct ieee80211_sub_if_data *sdata = rx->sdata;
  3872. rx->skb = skb;
  3873. /* See if we can do fast-rx; if we have to copy we already lost,
  3874. * so punt in that case. We should never have to deliver a data
  3875. * frame to multiple interfaces anyway.
  3876. *
  3877. * We skip the ieee80211_accept_frame() call and do the necessary
  3878. * checking inside ieee80211_invoke_fast_rx().
  3879. */
  3880. if (consume && rx->sta) {
  3881. struct ieee80211_fast_rx *fast_rx;
  3882. fast_rx = rcu_dereference(rx->sta->fast_rx);
  3883. if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
  3884. return true;
  3885. }
  3886. if (!ieee80211_accept_frame(rx))
  3887. return false;
  3888. if (!consume) {
  3889. skb = skb_copy(skb, GFP_ATOMIC);
  3890. if (!skb) {
  3891. if (net_ratelimit())
  3892. wiphy_debug(local->hw.wiphy,
  3893. "failed to copy skb for %s\n",
  3894. sdata->name);
  3895. return true;
  3896. }
  3897. rx->skb = skb;
  3898. }
  3899. ieee80211_invoke_rx_handlers(rx);
  3900. return true;
  3901. }
  3902. /*
  3903. * This is the actual Rx frames handler. as it belongs to Rx path it must
  3904. * be called with rcu_read_lock protection.
  3905. */
  3906. static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
  3907. struct ieee80211_sta *pubsta,
  3908. struct sk_buff *skb,
  3909. struct list_head *list)
  3910. {
  3911. struct ieee80211_local *local = hw_to_local(hw);
  3912. struct ieee80211_sub_if_data *sdata;
  3913. struct ieee80211_hdr *hdr;
  3914. __le16 fc;
  3915. struct ieee80211_rx_data rx;
  3916. struct ieee80211_sub_if_data *prev;
  3917. struct rhlist_head *tmp;
  3918. int err = 0;
  3919. fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
  3920. memset(&rx, 0, sizeof(rx));
  3921. rx.skb = skb;
  3922. rx.local = local;
  3923. rx.list = list;
  3924. if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
  3925. I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
  3926. if (ieee80211_is_mgmt(fc)) {
  3927. /* drop frame if too short for header */
  3928. if (skb->len < ieee80211_hdrlen(fc))
  3929. err = -ENOBUFS;
  3930. else
  3931. err = skb_linearize(skb);
  3932. } else {
  3933. err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
  3934. }
  3935. if (err) {
  3936. dev_kfree_skb(skb);
  3937. return;
  3938. }
  3939. hdr = (struct ieee80211_hdr *)skb->data;
  3940. ieee80211_parse_qos(&rx);
  3941. ieee80211_verify_alignment(&rx);
  3942. if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
  3943. ieee80211_is_beacon(hdr->frame_control) ||
  3944. ieee80211_is_s1g_beacon(hdr->frame_control)))
  3945. ieee80211_scan_rx(local, skb);
  3946. if (ieee80211_is_data(fc)) {
  3947. struct sta_info *sta, *prev_sta;
  3948. if (pubsta) {
  3949. rx.sta = container_of(pubsta, struct sta_info, sta);
  3950. rx.sdata = rx.sta->sdata;
  3951. if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
  3952. return;
  3953. goto out;
  3954. }
  3955. prev_sta = NULL;
  3956. for_each_sta_info(local, hdr->addr2, sta, tmp) {
  3957. if (!prev_sta) {
  3958. prev_sta = sta;
  3959. continue;
  3960. }
  3961. rx.sta = prev_sta;
  3962. rx.sdata = prev_sta->sdata;
  3963. ieee80211_prepare_and_rx_handle(&rx, skb, false);
  3964. prev_sta = sta;
  3965. }
  3966. if (prev_sta) {
  3967. rx.sta = prev_sta;
  3968. rx.sdata = prev_sta->sdata;
  3969. if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
  3970. return;
  3971. goto out;
  3972. }
  3973. }
  3974. prev = NULL;
  3975. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  3976. if (!ieee80211_sdata_running(sdata))
  3977. continue;
  3978. if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
  3979. sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
  3980. continue;
  3981. /*
  3982. * frame is destined for this interface, but if it's
  3983. * not also for the previous one we handle that after
  3984. * the loop to avoid copying the SKB once too much
  3985. */
  3986. if (!prev) {
  3987. prev = sdata;
  3988. continue;
  3989. }
  3990. rx.sta = sta_info_get_bss(prev, hdr->addr2);
  3991. rx.sdata = prev;
  3992. ieee80211_prepare_and_rx_handle(&rx, skb, false);
  3993. prev = sdata;
  3994. }
  3995. if (prev) {
  3996. rx.sta = sta_info_get_bss(prev, hdr->addr2);
  3997. rx.sdata = prev;
  3998. if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
  3999. return;
  4000. }
  4001. out:
  4002. dev_kfree_skb(skb);
  4003. }
  4004. /*
  4005. * This is the receive path handler. It is called by a low level driver when an
  4006. * 802.11 MPDU is received from the hardware.
  4007. */
  4008. void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
  4009. struct sk_buff *skb, struct list_head *list)
  4010. {
  4011. struct ieee80211_local *local = hw_to_local(hw);
  4012. struct ieee80211_rate *rate = NULL;
  4013. struct ieee80211_supported_band *sband;
  4014. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  4015. WARN_ON_ONCE(softirq_count() == 0);
  4016. if (WARN_ON(status->band >= NUM_NL80211_BANDS))
  4017. goto drop;
  4018. sband = local->hw.wiphy->bands[status->band];
  4019. if (WARN_ON(!sband))
  4020. goto drop;
  4021. /*
  4022. * If we're suspending, it is possible although not too likely
  4023. * that we'd be receiving frames after having already partially
  4024. * quiesced the stack. We can't process such frames then since
  4025. * that might, for example, cause stations to be added or other
  4026. * driver callbacks be invoked.
  4027. */
  4028. if (unlikely(local->quiescing || local->suspended))
  4029. goto drop;
  4030. /* We might be during a HW reconfig, prevent Rx for the same reason */
  4031. if (unlikely(local->in_reconfig))
  4032. goto drop;
  4033. /*
  4034. * The same happens when we're not even started,
  4035. * but that's worth a warning.
  4036. */
  4037. if (WARN_ON(!local->started))
  4038. goto drop;
  4039. if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
  4040. /*
  4041. * Validate the rate, unless a PLCP error means that
  4042. * we probably can't have a valid rate here anyway.
  4043. */
  4044. switch (status->encoding) {
  4045. case RX_ENC_HT:
  4046. /*
  4047. * rate_idx is MCS index, which can be [0-76]
  4048. * as documented on:
  4049. *
  4050. * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n
  4051. *
  4052. * Anything else would be some sort of driver or
  4053. * hardware error. The driver should catch hardware
  4054. * errors.
  4055. */
  4056. if (WARN(status->rate_idx > 76,
  4057. "Rate marked as an HT rate but passed "
  4058. "status->rate_idx is not "
  4059. "an MCS index [0-76]: %d (0x%02x)\n",
  4060. status->rate_idx,
  4061. status->rate_idx))
  4062. goto drop;
  4063. break;
  4064. case RX_ENC_VHT:
  4065. if (WARN_ONCE(status->rate_idx > 11 ||
  4066. !status->nss ||
  4067. status->nss > 8,
  4068. "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
  4069. status->rate_idx, status->nss))
  4070. goto drop;
  4071. break;
  4072. case RX_ENC_HE:
  4073. if (WARN_ONCE(status->rate_idx > 11 ||
  4074. !status->nss ||
  4075. status->nss > 8,
  4076. "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n",
  4077. status->rate_idx, status->nss))
  4078. goto drop;
  4079. break;
  4080. default:
  4081. WARN_ON_ONCE(1);
  4082. fallthrough;
  4083. case RX_ENC_LEGACY:
  4084. if (WARN_ON(status->rate_idx >= sband->n_bitrates))
  4085. goto drop;
  4086. rate = &sband->bitrates[status->rate_idx];
  4087. }
  4088. }
  4089. status->rx_flags = 0;
  4090. /*
  4091. * Frames with failed FCS/PLCP checksum are not returned,
  4092. * all other frames are returned without radiotap header
  4093. * if it was previously present.
  4094. * Also, frames with less than 16 bytes are dropped.
  4095. */
  4096. skb = ieee80211_rx_monitor(local, skb, rate);
  4097. if (!skb)
  4098. return;
  4099. ieee80211_tpt_led_trig_rx(local,
  4100. ((struct ieee80211_hdr *)skb->data)->frame_control,
  4101. skb->len);
  4102. __ieee80211_rx_handle_packet(hw, pubsta, skb, list);
  4103. return;
  4104. drop:
  4105. kfree_skb(skb);
  4106. }
  4107. EXPORT_SYMBOL(ieee80211_rx_list);
  4108. void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
  4109. struct sk_buff *skb, struct napi_struct *napi)
  4110. {
  4111. struct sk_buff *tmp;
  4112. LIST_HEAD(list);
  4113. /*
  4114. * key references and virtual interfaces are protected using RCU
  4115. * and this requires that we are in a read-side RCU section during
  4116. * receive processing
  4117. */
  4118. rcu_read_lock();
  4119. ieee80211_rx_list(hw, pubsta, skb, &list);
  4120. rcu_read_unlock();
  4121. if (!napi) {
  4122. netif_receive_skb_list(&list);
  4123. return;
  4124. }
  4125. list_for_each_entry_safe(skb, tmp, &list, list) {
  4126. skb_list_del_init(skb);
  4127. napi_gro_receive(napi, skb);
  4128. }
  4129. }
  4130. EXPORT_SYMBOL(ieee80211_rx_napi);
  4131. /* This is a version of the rx handler that can be called from hard irq
  4132. * context. Post the skb on the queue and schedule the tasklet */
  4133. void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
  4134. {
  4135. struct ieee80211_local *local = hw_to_local(hw);
  4136. BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
  4137. skb->pkt_type = IEEE80211_RX_MSG;
  4138. skb_queue_tail(&local->skb_queue, skb);
  4139. tasklet_schedule(&local->tasklet);
  4140. }
  4141. EXPORT_SYMBOL(ieee80211_rx_irqsafe);