bcm.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759
  1. // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
  2. /*
  3. * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
  4. *
  5. * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
  6. * All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the name of Volkswagen nor the names of its contributors
  17. * may be used to endorse or promote products derived from this software
  18. * without specific prior written permission.
  19. *
  20. * Alternatively, provided that this notice is retained in full, this
  21. * software may be distributed under the terms of the GNU General
  22. * Public License ("GPL") version 2, in which case the provisions of the
  23. * GPL apply INSTEAD OF those given above.
  24. *
  25. * The provided data structures and external interfaces from this code
  26. * are not restricted to be used by modules with a GPL compatible license.
  27. *
  28. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  29. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  30. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  31. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  32. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  33. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  34. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  35. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  36. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  37. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  38. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  39. * DAMAGE.
  40. *
  41. */
  42. #include <linux/module.h>
  43. #include <linux/init.h>
  44. #include <linux/interrupt.h>
  45. #include <linux/hrtimer.h>
  46. #include <linux/list.h>
  47. #include <linux/proc_fs.h>
  48. #include <linux/seq_file.h>
  49. #include <linux/uio.h>
  50. #include <linux/net.h>
  51. #include <linux/netdevice.h>
  52. #include <linux/socket.h>
  53. #include <linux/if_arp.h>
  54. #include <linux/skbuff.h>
  55. #include <linux/can.h>
  56. #include <linux/can/core.h>
  57. #include <linux/can/skb.h>
  58. #include <linux/can/bcm.h>
  59. #include <linux/slab.h>
  60. #include <net/sock.h>
  61. #include <net/net_namespace.h>
  62. /*
  63. * To send multiple CAN frame content within TX_SETUP or to filter
  64. * CAN messages with multiplex index within RX_SETUP, the number of
  65. * different filters is limited to 256 due to the one byte index value.
  66. */
  67. #define MAX_NFRAMES 256
  68. /* limit timers to 400 days for sending/timeouts */
  69. #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
  70. /* use of last_frames[index].flags */
  71. #define RX_RECV 0x40 /* received data for this element */
  72. #define RX_THR 0x80 /* element not been sent due to throttle feature */
  73. #define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
  74. /* get best masking value for can_rx_register() for a given single can_id */
  75. #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
  76. (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
  77. (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
  78. MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
  79. MODULE_LICENSE("Dual BSD/GPL");
  80. MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
  81. MODULE_ALIAS("can-proto-2");
  82. #define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
  83. /*
  84. * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
  85. * 64 bit aligned so the offset has to be multiples of 8 which is ensured
  86. * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler().
  87. */
  88. static inline u64 get_u64(const struct canfd_frame *cp, int offset)
  89. {
  90. return *(u64 *)(cp->data + offset);
  91. }
  92. struct bcm_op {
  93. struct list_head list;
  94. int ifindex;
  95. canid_t can_id;
  96. u32 flags;
  97. unsigned long frames_abs, frames_filtered;
  98. struct bcm_timeval ival1, ival2;
  99. struct hrtimer timer, thrtimer;
  100. ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
  101. int rx_ifindex;
  102. int cfsiz;
  103. u32 count;
  104. u32 nframes;
  105. u32 currframe;
  106. /* void pointers to arrays of struct can[fd]_frame */
  107. void *frames;
  108. void *last_frames;
  109. struct canfd_frame sframe;
  110. struct canfd_frame last_sframe;
  111. struct sock *sk;
  112. struct net_device *rx_reg_dev;
  113. };
  114. struct bcm_sock {
  115. struct sock sk;
  116. int bound;
  117. int ifindex;
  118. struct list_head notifier;
  119. struct list_head rx_ops;
  120. struct list_head tx_ops;
  121. unsigned long dropped_usr_msgs;
  122. struct proc_dir_entry *bcm_proc_read;
  123. char procname [32]; /* inode number in decimal with \0 */
  124. };
  125. static LIST_HEAD(bcm_notifier_list);
  126. static DEFINE_SPINLOCK(bcm_notifier_lock);
  127. static struct bcm_sock *bcm_busy_notifier;
  128. static inline struct bcm_sock *bcm_sk(const struct sock *sk)
  129. {
  130. return (struct bcm_sock *)sk;
  131. }
  132. static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
  133. {
  134. return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
  135. }
  136. /* check limitations for timeval provided by user */
  137. static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
  138. {
  139. if ((msg_head->ival1.tv_sec < 0) ||
  140. (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
  141. (msg_head->ival1.tv_usec < 0) ||
  142. (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
  143. (msg_head->ival2.tv_sec < 0) ||
  144. (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
  145. (msg_head->ival2.tv_usec < 0) ||
  146. (msg_head->ival2.tv_usec >= USEC_PER_SEC))
  147. return true;
  148. return false;
  149. }
  150. #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
  151. #define OPSIZ sizeof(struct bcm_op)
  152. #define MHSIZ sizeof(struct bcm_msg_head)
  153. /*
  154. * procfs functions
  155. */
  156. #if IS_ENABLED(CONFIG_PROC_FS)
  157. static char *bcm_proc_getifname(struct net *net, char *result, int ifindex)
  158. {
  159. struct net_device *dev;
  160. if (!ifindex)
  161. return "any";
  162. rcu_read_lock();
  163. dev = dev_get_by_index_rcu(net, ifindex);
  164. if (dev)
  165. strcpy(result, dev->name);
  166. else
  167. strcpy(result, "???");
  168. rcu_read_unlock();
  169. return result;
  170. }
  171. static int bcm_proc_show(struct seq_file *m, void *v)
  172. {
  173. char ifname[IFNAMSIZ];
  174. struct net *net = m->private;
  175. struct sock *sk = (struct sock *)PDE_DATA(m->file->f_inode);
  176. struct bcm_sock *bo = bcm_sk(sk);
  177. struct bcm_op *op;
  178. seq_printf(m, ">>> socket %pK", sk->sk_socket);
  179. seq_printf(m, " / sk %pK", sk);
  180. seq_printf(m, " / bo %pK", bo);
  181. seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
  182. seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
  183. seq_printf(m, " <<<\n");
  184. list_for_each_entry(op, &bo->rx_ops, list) {
  185. unsigned long reduction;
  186. /* print only active entries & prevent division by zero */
  187. if (!op->frames_abs)
  188. continue;
  189. seq_printf(m, "rx_op: %03X %-5s ", op->can_id,
  190. bcm_proc_getifname(net, ifname, op->ifindex));
  191. if (op->flags & CAN_FD_FRAME)
  192. seq_printf(m, "(%u)", op->nframes);
  193. else
  194. seq_printf(m, "[%u]", op->nframes);
  195. seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
  196. if (op->kt_ival1)
  197. seq_printf(m, "timeo=%lld ",
  198. (long long)ktime_to_us(op->kt_ival1));
  199. if (op->kt_ival2)
  200. seq_printf(m, "thr=%lld ",
  201. (long long)ktime_to_us(op->kt_ival2));
  202. seq_printf(m, "# recv %ld (%ld) => reduction: ",
  203. op->frames_filtered, op->frames_abs);
  204. reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
  205. seq_printf(m, "%s%ld%%\n",
  206. (reduction == 100) ? "near " : "", reduction);
  207. }
  208. list_for_each_entry(op, &bo->tx_ops, list) {
  209. seq_printf(m, "tx_op: %03X %s ", op->can_id,
  210. bcm_proc_getifname(net, ifname, op->ifindex));
  211. if (op->flags & CAN_FD_FRAME)
  212. seq_printf(m, "(%u) ", op->nframes);
  213. else
  214. seq_printf(m, "[%u] ", op->nframes);
  215. if (op->kt_ival1)
  216. seq_printf(m, "t1=%lld ",
  217. (long long)ktime_to_us(op->kt_ival1));
  218. if (op->kt_ival2)
  219. seq_printf(m, "t2=%lld ",
  220. (long long)ktime_to_us(op->kt_ival2));
  221. seq_printf(m, "# sent %ld\n", op->frames_abs);
  222. }
  223. seq_putc(m, '\n');
  224. return 0;
  225. }
  226. #endif /* CONFIG_PROC_FS */
  227. /*
  228. * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
  229. * of the given bcm tx op
  230. */
  231. static void bcm_can_tx(struct bcm_op *op)
  232. {
  233. struct sk_buff *skb;
  234. struct net_device *dev;
  235. struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
  236. /* no target device? => exit */
  237. if (!op->ifindex)
  238. return;
  239. dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
  240. if (!dev) {
  241. /* RFC: should this bcm_op remove itself here? */
  242. return;
  243. }
  244. skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any());
  245. if (!skb)
  246. goto out;
  247. can_skb_reserve(skb);
  248. can_skb_prv(skb)->ifindex = dev->ifindex;
  249. can_skb_prv(skb)->skbcnt = 0;
  250. skb_put_data(skb, cf, op->cfsiz);
  251. /* send with loopback */
  252. skb->dev = dev;
  253. can_skb_set_owner(skb, op->sk);
  254. can_send(skb, 1);
  255. /* update statistics */
  256. op->currframe++;
  257. op->frames_abs++;
  258. /* reached last frame? */
  259. if (op->currframe >= op->nframes)
  260. op->currframe = 0;
  261. out:
  262. dev_put(dev);
  263. }
  264. /*
  265. * bcm_send_to_user - send a BCM message to the userspace
  266. * (consisting of bcm_msg_head + x CAN frames)
  267. */
  268. static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
  269. struct canfd_frame *frames, int has_timestamp)
  270. {
  271. struct sk_buff *skb;
  272. struct canfd_frame *firstframe;
  273. struct sockaddr_can *addr;
  274. struct sock *sk = op->sk;
  275. unsigned int datalen = head->nframes * op->cfsiz;
  276. int err;
  277. skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
  278. if (!skb)
  279. return;
  280. skb_put_data(skb, head, sizeof(*head));
  281. if (head->nframes) {
  282. /* CAN frames starting here */
  283. firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
  284. skb_put_data(skb, frames, datalen);
  285. /*
  286. * the BCM uses the flags-element of the canfd_frame
  287. * structure for internal purposes. This is only
  288. * relevant for updates that are generated by the
  289. * BCM, where nframes is 1
  290. */
  291. if (head->nframes == 1)
  292. firstframe->flags &= BCM_CAN_FLAGS_MASK;
  293. }
  294. if (has_timestamp) {
  295. /* restore rx timestamp */
  296. skb->tstamp = op->rx_stamp;
  297. }
  298. /*
  299. * Put the datagram to the queue so that bcm_recvmsg() can
  300. * get it from there. We need to pass the interface index to
  301. * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
  302. * containing the interface index.
  303. */
  304. sock_skb_cb_check_size(sizeof(struct sockaddr_can));
  305. addr = (struct sockaddr_can *)skb->cb;
  306. memset(addr, 0, sizeof(*addr));
  307. addr->can_family = AF_CAN;
  308. addr->can_ifindex = op->rx_ifindex;
  309. err = sock_queue_rcv_skb(sk, skb);
  310. if (err < 0) {
  311. struct bcm_sock *bo = bcm_sk(sk);
  312. kfree_skb(skb);
  313. /* don't care about overflows in this statistic */
  314. bo->dropped_usr_msgs++;
  315. }
  316. }
  317. static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt)
  318. {
  319. ktime_t ival;
  320. if (op->kt_ival1 && op->count)
  321. ival = op->kt_ival1;
  322. else if (op->kt_ival2)
  323. ival = op->kt_ival2;
  324. else
  325. return false;
  326. hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival));
  327. return true;
  328. }
  329. static void bcm_tx_start_timer(struct bcm_op *op)
  330. {
  331. if (bcm_tx_set_expiry(op, &op->timer))
  332. hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT);
  333. }
  334. /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
  335. static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
  336. {
  337. struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
  338. struct bcm_msg_head msg_head;
  339. if (op->kt_ival1 && (op->count > 0)) {
  340. op->count--;
  341. if (!op->count && (op->flags & TX_COUNTEVT)) {
  342. /* create notification to user */
  343. memset(&msg_head, 0, sizeof(msg_head));
  344. msg_head.opcode = TX_EXPIRED;
  345. msg_head.flags = op->flags;
  346. msg_head.count = op->count;
  347. msg_head.ival1 = op->ival1;
  348. msg_head.ival2 = op->ival2;
  349. msg_head.can_id = op->can_id;
  350. msg_head.nframes = 0;
  351. bcm_send_to_user(op, &msg_head, NULL, 0);
  352. }
  353. bcm_can_tx(op);
  354. } else if (op->kt_ival2) {
  355. bcm_can_tx(op);
  356. }
  357. return bcm_tx_set_expiry(op, &op->timer) ?
  358. HRTIMER_RESTART : HRTIMER_NORESTART;
  359. }
  360. /*
  361. * bcm_rx_changed - create a RX_CHANGED notification due to changed content
  362. */
  363. static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
  364. {
  365. struct bcm_msg_head head;
  366. /* update statistics */
  367. op->frames_filtered++;
  368. /* prevent statistics overflow */
  369. if (op->frames_filtered > ULONG_MAX/100)
  370. op->frames_filtered = op->frames_abs = 0;
  371. /* this element is not throttled anymore */
  372. data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
  373. memset(&head, 0, sizeof(head));
  374. head.opcode = RX_CHANGED;
  375. head.flags = op->flags;
  376. head.count = op->count;
  377. head.ival1 = op->ival1;
  378. head.ival2 = op->ival2;
  379. head.can_id = op->can_id;
  380. head.nframes = 1;
  381. bcm_send_to_user(op, &head, data, 1);
  382. }
  383. /*
  384. * bcm_rx_update_and_send - process a detected relevant receive content change
  385. * 1. update the last received data
  386. * 2. send a notification to the user (if possible)
  387. */
  388. static void bcm_rx_update_and_send(struct bcm_op *op,
  389. struct canfd_frame *lastdata,
  390. const struct canfd_frame *rxdata)
  391. {
  392. memcpy(lastdata, rxdata, op->cfsiz);
  393. /* mark as used and throttled by default */
  394. lastdata->flags |= (RX_RECV|RX_THR);
  395. /* throttling mode inactive ? */
  396. if (!op->kt_ival2) {
  397. /* send RX_CHANGED to the user immediately */
  398. bcm_rx_changed(op, lastdata);
  399. return;
  400. }
  401. /* with active throttling timer we are just done here */
  402. if (hrtimer_active(&op->thrtimer))
  403. return;
  404. /* first reception with enabled throttling mode */
  405. if (!op->kt_lastmsg)
  406. goto rx_changed_settime;
  407. /* got a second frame inside a potential throttle period? */
  408. if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
  409. ktime_to_us(op->kt_ival2)) {
  410. /* do not send the saved data - only start throttle timer */
  411. hrtimer_start(&op->thrtimer,
  412. ktime_add(op->kt_lastmsg, op->kt_ival2),
  413. HRTIMER_MODE_ABS_SOFT);
  414. return;
  415. }
  416. /* the gap was that big, that throttling was not needed here */
  417. rx_changed_settime:
  418. bcm_rx_changed(op, lastdata);
  419. op->kt_lastmsg = ktime_get();
  420. }
  421. /*
  422. * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
  423. * received data stored in op->last_frames[]
  424. */
  425. static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
  426. const struct canfd_frame *rxdata)
  427. {
  428. struct canfd_frame *cf = op->frames + op->cfsiz * index;
  429. struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
  430. int i;
  431. /*
  432. * no one uses the MSBs of flags for comparison,
  433. * so we use it here to detect the first time of reception
  434. */
  435. if (!(lcf->flags & RX_RECV)) {
  436. /* received data for the first time => send update to user */
  437. bcm_rx_update_and_send(op, lcf, rxdata);
  438. return;
  439. }
  440. /* do a real check in CAN frame data section */
  441. for (i = 0; i < rxdata->len; i += 8) {
  442. if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
  443. (get_u64(cf, i) & get_u64(lcf, i))) {
  444. bcm_rx_update_and_send(op, lcf, rxdata);
  445. return;
  446. }
  447. }
  448. if (op->flags & RX_CHECK_DLC) {
  449. /* do a real check in CAN frame length */
  450. if (rxdata->len != lcf->len) {
  451. bcm_rx_update_and_send(op, lcf, rxdata);
  452. return;
  453. }
  454. }
  455. }
  456. /*
  457. * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
  458. */
  459. static void bcm_rx_starttimer(struct bcm_op *op)
  460. {
  461. if (op->flags & RX_NO_AUTOTIMER)
  462. return;
  463. if (op->kt_ival1)
  464. hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT);
  465. }
  466. /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */
  467. static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
  468. {
  469. struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
  470. struct bcm_msg_head msg_head;
  471. /* if user wants to be informed, when cyclic CAN-Messages come back */
  472. if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
  473. /* clear received CAN frames to indicate 'nothing received' */
  474. memset(op->last_frames, 0, op->nframes * op->cfsiz);
  475. }
  476. /* create notification to user */
  477. memset(&msg_head, 0, sizeof(msg_head));
  478. msg_head.opcode = RX_TIMEOUT;
  479. msg_head.flags = op->flags;
  480. msg_head.count = op->count;
  481. msg_head.ival1 = op->ival1;
  482. msg_head.ival2 = op->ival2;
  483. msg_head.can_id = op->can_id;
  484. msg_head.nframes = 0;
  485. bcm_send_to_user(op, &msg_head, NULL, 0);
  486. return HRTIMER_NORESTART;
  487. }
  488. /*
  489. * bcm_rx_do_flush - helper for bcm_rx_thr_flush
  490. */
  491. static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index)
  492. {
  493. struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
  494. if ((op->last_frames) && (lcf->flags & RX_THR)) {
  495. bcm_rx_changed(op, lcf);
  496. return 1;
  497. }
  498. return 0;
  499. }
  500. /*
  501. * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
  502. */
  503. static int bcm_rx_thr_flush(struct bcm_op *op)
  504. {
  505. int updated = 0;
  506. if (op->nframes > 1) {
  507. unsigned int i;
  508. /* for MUX filter we start at index 1 */
  509. for (i = 1; i < op->nframes; i++)
  510. updated += bcm_rx_do_flush(op, i);
  511. } else {
  512. /* for RX_FILTER_ID and simple filter */
  513. updated += bcm_rx_do_flush(op, 0);
  514. }
  515. return updated;
  516. }
  517. /*
  518. * bcm_rx_thr_handler - the time for blocked content updates is over now:
  519. * Check for throttled data and send it to the userspace
  520. */
  521. static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
  522. {
  523. struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
  524. if (bcm_rx_thr_flush(op)) {
  525. hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
  526. return HRTIMER_RESTART;
  527. } else {
  528. /* rearm throttle handling */
  529. op->kt_lastmsg = 0;
  530. return HRTIMER_NORESTART;
  531. }
  532. }
  533. /*
  534. * bcm_rx_handler - handle a CAN frame reception
  535. */
  536. static void bcm_rx_handler(struct sk_buff *skb, void *data)
  537. {
  538. struct bcm_op *op = (struct bcm_op *)data;
  539. const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
  540. unsigned int i;
  541. if (op->can_id != rxframe->can_id)
  542. return;
  543. /* make sure to handle the correct frame type (CAN / CAN FD) */
  544. if (skb->len != op->cfsiz)
  545. return;
  546. /* disable timeout */
  547. hrtimer_cancel(&op->timer);
  548. /* save rx timestamp */
  549. op->rx_stamp = skb->tstamp;
  550. /* save originator for recvfrom() */
  551. op->rx_ifindex = skb->dev->ifindex;
  552. /* update statistics */
  553. op->frames_abs++;
  554. if (op->flags & RX_RTR_FRAME) {
  555. /* send reply for RTR-request (placed in op->frames[0]) */
  556. bcm_can_tx(op);
  557. return;
  558. }
  559. if (op->flags & RX_FILTER_ID) {
  560. /* the easiest case */
  561. bcm_rx_update_and_send(op, op->last_frames, rxframe);
  562. goto rx_starttimer;
  563. }
  564. if (op->nframes == 1) {
  565. /* simple compare with index 0 */
  566. bcm_rx_cmp_to_index(op, 0, rxframe);
  567. goto rx_starttimer;
  568. }
  569. if (op->nframes > 1) {
  570. /*
  571. * multiplex compare
  572. *
  573. * find the first multiplex mask that fits.
  574. * Remark: The MUX-mask is stored in index 0 - but only the
  575. * first 64 bits of the frame data[] are relevant (CAN FD)
  576. */
  577. for (i = 1; i < op->nframes; i++) {
  578. if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
  579. (get_u64(op->frames, 0) &
  580. get_u64(op->frames + op->cfsiz * i, 0))) {
  581. bcm_rx_cmp_to_index(op, i, rxframe);
  582. break;
  583. }
  584. }
  585. }
  586. rx_starttimer:
  587. bcm_rx_starttimer(op);
  588. }
  589. /*
  590. * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
  591. */
  592. static struct bcm_op *bcm_find_op(struct list_head *ops,
  593. struct bcm_msg_head *mh, int ifindex)
  594. {
  595. struct bcm_op *op;
  596. list_for_each_entry(op, ops, list) {
  597. if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
  598. (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME))
  599. return op;
  600. }
  601. return NULL;
  602. }
  603. static void bcm_remove_op(struct bcm_op *op)
  604. {
  605. hrtimer_cancel(&op->timer);
  606. hrtimer_cancel(&op->thrtimer);
  607. if ((op->frames) && (op->frames != &op->sframe))
  608. kfree(op->frames);
  609. if ((op->last_frames) && (op->last_frames != &op->last_sframe))
  610. kfree(op->last_frames);
  611. kfree(op);
  612. }
  613. static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
  614. {
  615. if (op->rx_reg_dev == dev) {
  616. can_rx_unregister(dev_net(dev), dev, op->can_id,
  617. REGMASK(op->can_id), bcm_rx_handler, op);
  618. /* mark as removed subscription */
  619. op->rx_reg_dev = NULL;
  620. } else
  621. printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
  622. "mismatch %p %p\n", op->rx_reg_dev, dev);
  623. }
  624. /*
  625. * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
  626. */
  627. static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
  628. int ifindex)
  629. {
  630. struct bcm_op *op, *n;
  631. list_for_each_entry_safe(op, n, ops, list) {
  632. if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
  633. (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
  634. /*
  635. * Don't care if we're bound or not (due to netdev
  636. * problems) can_rx_unregister() is always a save
  637. * thing to do here.
  638. */
  639. if (op->ifindex) {
  640. /*
  641. * Only remove subscriptions that had not
  642. * been removed due to NETDEV_UNREGISTER
  643. * in bcm_notifier()
  644. */
  645. if (op->rx_reg_dev) {
  646. struct net_device *dev;
  647. dev = dev_get_by_index(sock_net(op->sk),
  648. op->ifindex);
  649. if (dev) {
  650. bcm_rx_unreg(dev, op);
  651. dev_put(dev);
  652. }
  653. }
  654. } else
  655. can_rx_unregister(sock_net(op->sk), NULL,
  656. op->can_id,
  657. REGMASK(op->can_id),
  658. bcm_rx_handler, op);
  659. list_del(&op->list);
  660. synchronize_rcu();
  661. bcm_remove_op(op);
  662. return 1; /* done */
  663. }
  664. }
  665. return 0; /* not found */
  666. }
  667. /*
  668. * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
  669. */
  670. static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
  671. int ifindex)
  672. {
  673. struct bcm_op *op, *n;
  674. list_for_each_entry_safe(op, n, ops, list) {
  675. if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
  676. (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
  677. list_del(&op->list);
  678. bcm_remove_op(op);
  679. return 1; /* done */
  680. }
  681. }
  682. return 0; /* not found */
  683. }
  684. /*
  685. * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
  686. */
  687. static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
  688. int ifindex)
  689. {
  690. struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex);
  691. if (!op)
  692. return -EINVAL;
  693. /* put current values into msg_head */
  694. msg_head->flags = op->flags;
  695. msg_head->count = op->count;
  696. msg_head->ival1 = op->ival1;
  697. msg_head->ival2 = op->ival2;
  698. msg_head->nframes = op->nframes;
  699. bcm_send_to_user(op, msg_head, op->frames, 0);
  700. return MHSIZ;
  701. }
  702. /*
  703. * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
  704. */
  705. static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
  706. int ifindex, struct sock *sk)
  707. {
  708. struct bcm_sock *bo = bcm_sk(sk);
  709. struct bcm_op *op;
  710. struct canfd_frame *cf;
  711. unsigned int i;
  712. int err;
  713. /* we need a real device to send frames */
  714. if (!ifindex)
  715. return -ENODEV;
  716. /* check nframes boundaries - we need at least one CAN frame */
  717. if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
  718. return -EINVAL;
  719. /* check timeval limitations */
  720. if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
  721. return -EINVAL;
  722. /* check the given can_id */
  723. op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
  724. if (op) {
  725. /* update existing BCM operation */
  726. /*
  727. * Do we need more space for the CAN frames than currently
  728. * allocated? -> This is a _really_ unusual use-case and
  729. * therefore (complexity / locking) it is not supported.
  730. */
  731. if (msg_head->nframes > op->nframes)
  732. return -E2BIG;
  733. /* update CAN frames content */
  734. for (i = 0; i < msg_head->nframes; i++) {
  735. cf = op->frames + op->cfsiz * i;
  736. err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
  737. if (op->flags & CAN_FD_FRAME) {
  738. if (cf->len > 64)
  739. err = -EINVAL;
  740. } else {
  741. if (cf->len > 8)
  742. err = -EINVAL;
  743. }
  744. if (err < 0)
  745. return err;
  746. if (msg_head->flags & TX_CP_CAN_ID) {
  747. /* copy can_id into frame */
  748. cf->can_id = msg_head->can_id;
  749. }
  750. }
  751. op->flags = msg_head->flags;
  752. } else {
  753. /* insert new BCM operation for the given can_id */
  754. op = kzalloc(OPSIZ, GFP_KERNEL);
  755. if (!op)
  756. return -ENOMEM;
  757. op->can_id = msg_head->can_id;
  758. op->cfsiz = CFSIZ(msg_head->flags);
  759. op->flags = msg_head->flags;
  760. /* create array for CAN frames and copy the data */
  761. if (msg_head->nframes > 1) {
  762. op->frames = kmalloc_array(msg_head->nframes,
  763. op->cfsiz,
  764. GFP_KERNEL);
  765. if (!op->frames) {
  766. kfree(op);
  767. return -ENOMEM;
  768. }
  769. } else
  770. op->frames = &op->sframe;
  771. for (i = 0; i < msg_head->nframes; i++) {
  772. cf = op->frames + op->cfsiz * i;
  773. err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
  774. if (op->flags & CAN_FD_FRAME) {
  775. if (cf->len > 64)
  776. err = -EINVAL;
  777. } else {
  778. if (cf->len > 8)
  779. err = -EINVAL;
  780. }
  781. if (err < 0) {
  782. if (op->frames != &op->sframe)
  783. kfree(op->frames);
  784. kfree(op);
  785. return err;
  786. }
  787. if (msg_head->flags & TX_CP_CAN_ID) {
  788. /* copy can_id into frame */
  789. cf->can_id = msg_head->can_id;
  790. }
  791. }
  792. /* tx_ops never compare with previous received messages */
  793. op->last_frames = NULL;
  794. /* bcm_can_tx / bcm_tx_timeout_handler needs this */
  795. op->sk = sk;
  796. op->ifindex = ifindex;
  797. /* initialize uninitialized (kzalloc) structure */
  798. hrtimer_init(&op->timer, CLOCK_MONOTONIC,
  799. HRTIMER_MODE_REL_SOFT);
  800. op->timer.function = bcm_tx_timeout_handler;
  801. /* currently unused in tx_ops */
  802. hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
  803. HRTIMER_MODE_REL_SOFT);
  804. /* add this bcm_op to the list of the tx_ops */
  805. list_add(&op->list, &bo->tx_ops);
  806. } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
  807. if (op->nframes != msg_head->nframes) {
  808. op->nframes = msg_head->nframes;
  809. /* start multiple frame transmission with index 0 */
  810. op->currframe = 0;
  811. }
  812. /* check flags */
  813. if (op->flags & TX_RESET_MULTI_IDX) {
  814. /* start multiple frame transmission with index 0 */
  815. op->currframe = 0;
  816. }
  817. if (op->flags & SETTIMER) {
  818. /* set timer values */
  819. op->count = msg_head->count;
  820. op->ival1 = msg_head->ival1;
  821. op->ival2 = msg_head->ival2;
  822. op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
  823. op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
  824. /* disable an active timer due to zero values? */
  825. if (!op->kt_ival1 && !op->kt_ival2)
  826. hrtimer_cancel(&op->timer);
  827. }
  828. if (op->flags & STARTTIMER) {
  829. hrtimer_cancel(&op->timer);
  830. /* spec: send CAN frame when starting timer */
  831. op->flags |= TX_ANNOUNCE;
  832. }
  833. if (op->flags & TX_ANNOUNCE) {
  834. bcm_can_tx(op);
  835. if (op->count)
  836. op->count--;
  837. }
  838. if (op->flags & STARTTIMER)
  839. bcm_tx_start_timer(op);
  840. return msg_head->nframes * op->cfsiz + MHSIZ;
  841. }
  842. /*
  843. * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
  844. */
  845. static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
  846. int ifindex, struct sock *sk)
  847. {
  848. struct bcm_sock *bo = bcm_sk(sk);
  849. struct bcm_op *op;
  850. int do_rx_register;
  851. int err = 0;
  852. if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
  853. /* be robust against wrong usage ... */
  854. msg_head->flags |= RX_FILTER_ID;
  855. /* ignore trailing garbage */
  856. msg_head->nframes = 0;
  857. }
  858. /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
  859. if (msg_head->nframes > MAX_NFRAMES + 1)
  860. return -EINVAL;
  861. if ((msg_head->flags & RX_RTR_FRAME) &&
  862. ((msg_head->nframes != 1) ||
  863. (!(msg_head->can_id & CAN_RTR_FLAG))))
  864. return -EINVAL;
  865. /* check timeval limitations */
  866. if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
  867. return -EINVAL;
  868. /* check the given can_id */
  869. op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
  870. if (op) {
  871. /* update existing BCM operation */
  872. /*
  873. * Do we need more space for the CAN frames than currently
  874. * allocated? -> This is a _really_ unusual use-case and
  875. * therefore (complexity / locking) it is not supported.
  876. */
  877. if (msg_head->nframes > op->nframes)
  878. return -E2BIG;
  879. if (msg_head->nframes) {
  880. /* update CAN frames content */
  881. err = memcpy_from_msg(op->frames, msg,
  882. msg_head->nframes * op->cfsiz);
  883. if (err < 0)
  884. return err;
  885. /* clear last_frames to indicate 'nothing received' */
  886. memset(op->last_frames, 0, msg_head->nframes * op->cfsiz);
  887. }
  888. op->nframes = msg_head->nframes;
  889. op->flags = msg_head->flags;
  890. /* Only an update -> do not call can_rx_register() */
  891. do_rx_register = 0;
  892. } else {
  893. /* insert new BCM operation for the given can_id */
  894. op = kzalloc(OPSIZ, GFP_KERNEL);
  895. if (!op)
  896. return -ENOMEM;
  897. op->can_id = msg_head->can_id;
  898. op->nframes = msg_head->nframes;
  899. op->cfsiz = CFSIZ(msg_head->flags);
  900. op->flags = msg_head->flags;
  901. if (msg_head->nframes > 1) {
  902. /* create array for CAN frames and copy the data */
  903. op->frames = kmalloc_array(msg_head->nframes,
  904. op->cfsiz,
  905. GFP_KERNEL);
  906. if (!op->frames) {
  907. kfree(op);
  908. return -ENOMEM;
  909. }
  910. /* create and init array for received CAN frames */
  911. op->last_frames = kcalloc(msg_head->nframes,
  912. op->cfsiz,
  913. GFP_KERNEL);
  914. if (!op->last_frames) {
  915. kfree(op->frames);
  916. kfree(op);
  917. return -ENOMEM;
  918. }
  919. } else {
  920. op->frames = &op->sframe;
  921. op->last_frames = &op->last_sframe;
  922. }
  923. if (msg_head->nframes) {
  924. err = memcpy_from_msg(op->frames, msg,
  925. msg_head->nframes * op->cfsiz);
  926. if (err < 0) {
  927. if (op->frames != &op->sframe)
  928. kfree(op->frames);
  929. if (op->last_frames != &op->last_sframe)
  930. kfree(op->last_frames);
  931. kfree(op);
  932. return err;
  933. }
  934. }
  935. /* bcm_can_tx / bcm_tx_timeout_handler needs this */
  936. op->sk = sk;
  937. op->ifindex = ifindex;
  938. /* ifindex for timeout events w/o previous frame reception */
  939. op->rx_ifindex = ifindex;
  940. /* initialize uninitialized (kzalloc) structure */
  941. hrtimer_init(&op->timer, CLOCK_MONOTONIC,
  942. HRTIMER_MODE_REL_SOFT);
  943. op->timer.function = bcm_rx_timeout_handler;
  944. hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
  945. HRTIMER_MODE_REL_SOFT);
  946. op->thrtimer.function = bcm_rx_thr_handler;
  947. /* add this bcm_op to the list of the rx_ops */
  948. list_add(&op->list, &bo->rx_ops);
  949. /* call can_rx_register() */
  950. do_rx_register = 1;
  951. } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
  952. /* check flags */
  953. if (op->flags & RX_RTR_FRAME) {
  954. struct canfd_frame *frame0 = op->frames;
  955. /* no timers in RTR-mode */
  956. hrtimer_cancel(&op->thrtimer);
  957. hrtimer_cancel(&op->timer);
  958. /*
  959. * funny feature in RX(!)_SETUP only for RTR-mode:
  960. * copy can_id into frame BUT without RTR-flag to
  961. * prevent a full-load-loopback-test ... ;-]
  962. */
  963. if ((op->flags & TX_CP_CAN_ID) ||
  964. (frame0->can_id == op->can_id))
  965. frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
  966. } else {
  967. if (op->flags & SETTIMER) {
  968. /* set timer value */
  969. op->ival1 = msg_head->ival1;
  970. op->ival2 = msg_head->ival2;
  971. op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
  972. op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
  973. /* disable an active timer due to zero value? */
  974. if (!op->kt_ival1)
  975. hrtimer_cancel(&op->timer);
  976. /*
  977. * In any case cancel the throttle timer, flush
  978. * potentially blocked msgs and reset throttle handling
  979. */
  980. op->kt_lastmsg = 0;
  981. hrtimer_cancel(&op->thrtimer);
  982. bcm_rx_thr_flush(op);
  983. }
  984. if ((op->flags & STARTTIMER) && op->kt_ival1)
  985. hrtimer_start(&op->timer, op->kt_ival1,
  986. HRTIMER_MODE_REL_SOFT);
  987. }
  988. /* now we can register for can_ids, if we added a new bcm_op */
  989. if (do_rx_register) {
  990. if (ifindex) {
  991. struct net_device *dev;
  992. dev = dev_get_by_index(sock_net(sk), ifindex);
  993. if (dev) {
  994. err = can_rx_register(sock_net(sk), dev,
  995. op->can_id,
  996. REGMASK(op->can_id),
  997. bcm_rx_handler, op,
  998. "bcm", sk);
  999. op->rx_reg_dev = dev;
  1000. dev_put(dev);
  1001. }
  1002. } else
  1003. err = can_rx_register(sock_net(sk), NULL, op->can_id,
  1004. REGMASK(op->can_id),
  1005. bcm_rx_handler, op, "bcm", sk);
  1006. if (err) {
  1007. /* this bcm rx op is broken -> remove it */
  1008. list_del(&op->list);
  1009. bcm_remove_op(op);
  1010. return err;
  1011. }
  1012. }
  1013. return msg_head->nframes * op->cfsiz + MHSIZ;
  1014. }
  1015. /*
  1016. * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
  1017. */
  1018. static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
  1019. int cfsiz)
  1020. {
  1021. struct sk_buff *skb;
  1022. struct net_device *dev;
  1023. int err;
  1024. /* we need a real device to send frames */
  1025. if (!ifindex)
  1026. return -ENODEV;
  1027. skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL);
  1028. if (!skb)
  1029. return -ENOMEM;
  1030. can_skb_reserve(skb);
  1031. err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
  1032. if (err < 0) {
  1033. kfree_skb(skb);
  1034. return err;
  1035. }
  1036. dev = dev_get_by_index(sock_net(sk), ifindex);
  1037. if (!dev) {
  1038. kfree_skb(skb);
  1039. return -ENODEV;
  1040. }
  1041. can_skb_prv(skb)->ifindex = dev->ifindex;
  1042. can_skb_prv(skb)->skbcnt = 0;
  1043. skb->dev = dev;
  1044. can_skb_set_owner(skb, sk);
  1045. err = can_send(skb, 1); /* send with loopback */
  1046. dev_put(dev);
  1047. if (err)
  1048. return err;
  1049. return cfsiz + MHSIZ;
  1050. }
  1051. /*
  1052. * bcm_sendmsg - process BCM commands (opcodes) from the userspace
  1053. */
  1054. static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
  1055. {
  1056. struct sock *sk = sock->sk;
  1057. struct bcm_sock *bo = bcm_sk(sk);
  1058. int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
  1059. struct bcm_msg_head msg_head;
  1060. int cfsiz;
  1061. int ret; /* read bytes or error codes as return value */
  1062. if (!bo->bound)
  1063. return -ENOTCONN;
  1064. /* check for valid message length from userspace */
  1065. if (size < MHSIZ)
  1066. return -EINVAL;
  1067. /* read message head information */
  1068. ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
  1069. if (ret < 0)
  1070. return ret;
  1071. cfsiz = CFSIZ(msg_head.flags);
  1072. if ((size - MHSIZ) % cfsiz)
  1073. return -EINVAL;
  1074. /* check for alternative ifindex for this bcm_op */
  1075. if (!ifindex && msg->msg_name) {
  1076. /* no bound device as default => check msg_name */
  1077. DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
  1078. if (msg->msg_namelen < BCM_MIN_NAMELEN)
  1079. return -EINVAL;
  1080. if (addr->can_family != AF_CAN)
  1081. return -EINVAL;
  1082. /* ifindex from sendto() */
  1083. ifindex = addr->can_ifindex;
  1084. if (ifindex) {
  1085. struct net_device *dev;
  1086. dev = dev_get_by_index(sock_net(sk), ifindex);
  1087. if (!dev)
  1088. return -ENODEV;
  1089. if (dev->type != ARPHRD_CAN) {
  1090. dev_put(dev);
  1091. return -ENODEV;
  1092. }
  1093. dev_put(dev);
  1094. }
  1095. }
  1096. lock_sock(sk);
  1097. switch (msg_head.opcode) {
  1098. case TX_SETUP:
  1099. ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
  1100. break;
  1101. case RX_SETUP:
  1102. ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
  1103. break;
  1104. case TX_DELETE:
  1105. if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex))
  1106. ret = MHSIZ;
  1107. else
  1108. ret = -EINVAL;
  1109. break;
  1110. case RX_DELETE:
  1111. if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex))
  1112. ret = MHSIZ;
  1113. else
  1114. ret = -EINVAL;
  1115. break;
  1116. case TX_READ:
  1117. /* reuse msg_head for the reply to TX_READ */
  1118. msg_head.opcode = TX_STATUS;
  1119. ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
  1120. break;
  1121. case RX_READ:
  1122. /* reuse msg_head for the reply to RX_READ */
  1123. msg_head.opcode = RX_STATUS;
  1124. ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
  1125. break;
  1126. case TX_SEND:
  1127. /* we need exactly one CAN frame behind the msg head */
  1128. if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ))
  1129. ret = -EINVAL;
  1130. else
  1131. ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
  1132. break;
  1133. default:
  1134. ret = -EINVAL;
  1135. break;
  1136. }
  1137. release_sock(sk);
  1138. return ret;
  1139. }
  1140. /*
  1141. * notification handler for netdevice status changes
  1142. */
  1143. static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
  1144. struct net_device *dev)
  1145. {
  1146. struct sock *sk = &bo->sk;
  1147. struct bcm_op *op;
  1148. int notify_enodev = 0;
  1149. if (!net_eq(dev_net(dev), sock_net(sk)))
  1150. return;
  1151. switch (msg) {
  1152. case NETDEV_UNREGISTER:
  1153. lock_sock(sk);
  1154. /* remove device specific receive entries */
  1155. list_for_each_entry(op, &bo->rx_ops, list)
  1156. if (op->rx_reg_dev == dev)
  1157. bcm_rx_unreg(dev, op);
  1158. /* remove device reference, if this is our bound device */
  1159. if (bo->bound && bo->ifindex == dev->ifindex) {
  1160. bo->bound = 0;
  1161. bo->ifindex = 0;
  1162. notify_enodev = 1;
  1163. }
  1164. release_sock(sk);
  1165. if (notify_enodev) {
  1166. sk->sk_err = ENODEV;
  1167. if (!sock_flag(sk, SOCK_DEAD))
  1168. sk->sk_error_report(sk);
  1169. }
  1170. break;
  1171. case NETDEV_DOWN:
  1172. if (bo->bound && bo->ifindex == dev->ifindex) {
  1173. sk->sk_err = ENETDOWN;
  1174. if (!sock_flag(sk, SOCK_DEAD))
  1175. sk->sk_error_report(sk);
  1176. }
  1177. }
  1178. }
  1179. static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
  1180. void *ptr)
  1181. {
  1182. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  1183. if (dev->type != ARPHRD_CAN)
  1184. return NOTIFY_DONE;
  1185. if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
  1186. return NOTIFY_DONE;
  1187. if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
  1188. return NOTIFY_DONE;
  1189. spin_lock(&bcm_notifier_lock);
  1190. list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
  1191. spin_unlock(&bcm_notifier_lock);
  1192. bcm_notify(bcm_busy_notifier, msg, dev);
  1193. spin_lock(&bcm_notifier_lock);
  1194. }
  1195. bcm_busy_notifier = NULL;
  1196. spin_unlock(&bcm_notifier_lock);
  1197. return NOTIFY_DONE;
  1198. }
  1199. /*
  1200. * initial settings for all BCM sockets to be set at socket creation time
  1201. */
  1202. static int bcm_init(struct sock *sk)
  1203. {
  1204. struct bcm_sock *bo = bcm_sk(sk);
  1205. bo->bound = 0;
  1206. bo->ifindex = 0;
  1207. bo->dropped_usr_msgs = 0;
  1208. bo->bcm_proc_read = NULL;
  1209. INIT_LIST_HEAD(&bo->tx_ops);
  1210. INIT_LIST_HEAD(&bo->rx_ops);
  1211. /* set notifier */
  1212. spin_lock(&bcm_notifier_lock);
  1213. list_add_tail(&bo->notifier, &bcm_notifier_list);
  1214. spin_unlock(&bcm_notifier_lock);
  1215. return 0;
  1216. }
  1217. /*
  1218. * standard socket functions
  1219. */
  1220. static int bcm_release(struct socket *sock)
  1221. {
  1222. struct sock *sk = sock->sk;
  1223. struct net *net;
  1224. struct bcm_sock *bo;
  1225. struct bcm_op *op, *next;
  1226. if (!sk)
  1227. return 0;
  1228. net = sock_net(sk);
  1229. bo = bcm_sk(sk);
  1230. /* remove bcm_ops, timer, rx_unregister(), etc. */
  1231. spin_lock(&bcm_notifier_lock);
  1232. while (bcm_busy_notifier == bo) {
  1233. spin_unlock(&bcm_notifier_lock);
  1234. schedule_timeout_uninterruptible(1);
  1235. spin_lock(&bcm_notifier_lock);
  1236. }
  1237. list_del(&bo->notifier);
  1238. spin_unlock(&bcm_notifier_lock);
  1239. lock_sock(sk);
  1240. list_for_each_entry_safe(op, next, &bo->tx_ops, list)
  1241. bcm_remove_op(op);
  1242. list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
  1243. /*
  1244. * Don't care if we're bound or not (due to netdev problems)
  1245. * can_rx_unregister() is always a save thing to do here.
  1246. */
  1247. if (op->ifindex) {
  1248. /*
  1249. * Only remove subscriptions that had not
  1250. * been removed due to NETDEV_UNREGISTER
  1251. * in bcm_notifier()
  1252. */
  1253. if (op->rx_reg_dev) {
  1254. struct net_device *dev;
  1255. dev = dev_get_by_index(net, op->ifindex);
  1256. if (dev) {
  1257. bcm_rx_unreg(dev, op);
  1258. dev_put(dev);
  1259. }
  1260. }
  1261. } else
  1262. can_rx_unregister(net, NULL, op->can_id,
  1263. REGMASK(op->can_id),
  1264. bcm_rx_handler, op);
  1265. }
  1266. synchronize_rcu();
  1267. list_for_each_entry_safe(op, next, &bo->rx_ops, list)
  1268. bcm_remove_op(op);
  1269. #if IS_ENABLED(CONFIG_PROC_FS)
  1270. /* remove procfs entry */
  1271. if (net->can.bcmproc_dir && bo->bcm_proc_read)
  1272. remove_proc_entry(bo->procname, net->can.bcmproc_dir);
  1273. #endif /* CONFIG_PROC_FS */
  1274. /* remove device reference */
  1275. if (bo->bound) {
  1276. bo->bound = 0;
  1277. bo->ifindex = 0;
  1278. }
  1279. sock_orphan(sk);
  1280. sock->sk = NULL;
  1281. release_sock(sk);
  1282. sock_put(sk);
  1283. return 0;
  1284. }
  1285. static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
  1286. int flags)
  1287. {
  1288. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  1289. struct sock *sk = sock->sk;
  1290. struct bcm_sock *bo = bcm_sk(sk);
  1291. struct net *net = sock_net(sk);
  1292. int ret = 0;
  1293. if (len < BCM_MIN_NAMELEN)
  1294. return -EINVAL;
  1295. lock_sock(sk);
  1296. if (bo->bound) {
  1297. ret = -EISCONN;
  1298. goto fail;
  1299. }
  1300. /* bind a device to this socket */
  1301. if (addr->can_ifindex) {
  1302. struct net_device *dev;
  1303. dev = dev_get_by_index(net, addr->can_ifindex);
  1304. if (!dev) {
  1305. ret = -ENODEV;
  1306. goto fail;
  1307. }
  1308. if (dev->type != ARPHRD_CAN) {
  1309. dev_put(dev);
  1310. ret = -ENODEV;
  1311. goto fail;
  1312. }
  1313. bo->ifindex = dev->ifindex;
  1314. dev_put(dev);
  1315. } else {
  1316. /* no interface reference for ifindex = 0 ('any' CAN device) */
  1317. bo->ifindex = 0;
  1318. }
  1319. #if IS_ENABLED(CONFIG_PROC_FS)
  1320. if (net->can.bcmproc_dir) {
  1321. /* unique socket address as filename */
  1322. sprintf(bo->procname, "%lu", sock_i_ino(sk));
  1323. bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644,
  1324. net->can.bcmproc_dir,
  1325. bcm_proc_show, sk);
  1326. if (!bo->bcm_proc_read) {
  1327. ret = -ENOMEM;
  1328. goto fail;
  1329. }
  1330. }
  1331. #endif /* CONFIG_PROC_FS */
  1332. bo->bound = 1;
  1333. fail:
  1334. release_sock(sk);
  1335. return ret;
  1336. }
  1337. static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
  1338. int flags)
  1339. {
  1340. struct sock *sk = sock->sk;
  1341. struct sk_buff *skb;
  1342. int error = 0;
  1343. int noblock;
  1344. int err;
  1345. noblock = flags & MSG_DONTWAIT;
  1346. flags &= ~MSG_DONTWAIT;
  1347. skb = skb_recv_datagram(sk, flags, noblock, &error);
  1348. if (!skb)
  1349. return error;
  1350. if (skb->len < size)
  1351. size = skb->len;
  1352. err = memcpy_to_msg(msg, skb->data, size);
  1353. if (err < 0) {
  1354. skb_free_datagram(sk, skb);
  1355. return err;
  1356. }
  1357. sock_recv_ts_and_drops(msg, sk, skb);
  1358. if (msg->msg_name) {
  1359. __sockaddr_check_size(BCM_MIN_NAMELEN);
  1360. msg->msg_namelen = BCM_MIN_NAMELEN;
  1361. memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
  1362. }
  1363. skb_free_datagram(sk, skb);
  1364. return size;
  1365. }
  1366. static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
  1367. unsigned long arg)
  1368. {
  1369. /* no ioctls for socket layer -> hand it down to NIC layer */
  1370. return -ENOIOCTLCMD;
  1371. }
  1372. static const struct proto_ops bcm_ops = {
  1373. .family = PF_CAN,
  1374. .release = bcm_release,
  1375. .bind = sock_no_bind,
  1376. .connect = bcm_connect,
  1377. .socketpair = sock_no_socketpair,
  1378. .accept = sock_no_accept,
  1379. .getname = sock_no_getname,
  1380. .poll = datagram_poll,
  1381. .ioctl = bcm_sock_no_ioctlcmd,
  1382. .gettstamp = sock_gettstamp,
  1383. .listen = sock_no_listen,
  1384. .shutdown = sock_no_shutdown,
  1385. .sendmsg = bcm_sendmsg,
  1386. .recvmsg = bcm_recvmsg,
  1387. .mmap = sock_no_mmap,
  1388. .sendpage = sock_no_sendpage,
  1389. };
  1390. static struct proto bcm_proto __read_mostly = {
  1391. .name = "CAN_BCM",
  1392. .owner = THIS_MODULE,
  1393. .obj_size = sizeof(struct bcm_sock),
  1394. .init = bcm_init,
  1395. };
  1396. static const struct can_proto bcm_can_proto = {
  1397. .type = SOCK_DGRAM,
  1398. .protocol = CAN_BCM,
  1399. .ops = &bcm_ops,
  1400. .prot = &bcm_proto,
  1401. };
  1402. static int canbcm_pernet_init(struct net *net)
  1403. {
  1404. #if IS_ENABLED(CONFIG_PROC_FS)
  1405. /* create /proc/net/can-bcm directory */
  1406. net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net);
  1407. #endif /* CONFIG_PROC_FS */
  1408. return 0;
  1409. }
  1410. static void canbcm_pernet_exit(struct net *net)
  1411. {
  1412. #if IS_ENABLED(CONFIG_PROC_FS)
  1413. /* remove /proc/net/can-bcm directory */
  1414. if (net->can.bcmproc_dir)
  1415. remove_proc_entry("can-bcm", net->proc_net);
  1416. #endif /* CONFIG_PROC_FS */
  1417. }
  1418. static struct pernet_operations canbcm_pernet_ops __read_mostly = {
  1419. .init = canbcm_pernet_init,
  1420. .exit = canbcm_pernet_exit,
  1421. };
  1422. static struct notifier_block canbcm_notifier = {
  1423. .notifier_call = bcm_notifier
  1424. };
  1425. static int __init bcm_module_init(void)
  1426. {
  1427. int err;
  1428. pr_info("can: broadcast manager protocol\n");
  1429. err = can_proto_register(&bcm_can_proto);
  1430. if (err < 0) {
  1431. printk(KERN_ERR "can: registration of bcm protocol failed\n");
  1432. return err;
  1433. }
  1434. register_pernet_subsys(&canbcm_pernet_ops);
  1435. register_netdevice_notifier(&canbcm_notifier);
  1436. return 0;
  1437. }
  1438. static void __exit bcm_module_exit(void)
  1439. {
  1440. can_proto_unregister(&bcm_can_proto);
  1441. unregister_netdevice_notifier(&canbcm_notifier);
  1442. unregister_pernet_subsys(&canbcm_pernet_ops);
  1443. }
  1444. module_init(bcm_module_init);
  1445. module_exit(bcm_module_exit);