caif-hsi: Replace platform device with ops structure.
[firefly-linux-kernel-4.4.55.git] / drivers / net / caif / caif_hsi.c
1 /*
2  * Copyright (C) ST-Ericsson AB 2010
3  * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4  * Author:  Daniel Martensson / daniel.martensson@stericsson.com
5  *          Dmitry.Tarnyagin  / dmitry.tarnyagin@stericsson.com
6  * License terms: GNU General Public License (GPL) version 2.
7  */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME fmt
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/netdevice.h>
15 #include <linux/string.h>
16 #include <linux/list.h>
17 #include <linux/interrupt.h>
18 #include <linux/delay.h>
19 #include <linux/sched.h>
20 #include <linux/if_arp.h>
21 #include <linux/timer.h>
22 #include <net/rtnetlink.h>
23 #include <linux/pkt_sched.h>
24 #include <net/caif/caif_layer.h>
25 #include <net/caif/caif_hsi.h>
26
27 MODULE_LICENSE("GPL");
28 MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
29 MODULE_DESCRIPTION("CAIF HSI driver");
30
31 /* Returns the number of padding bytes for alignment. */
32 #define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
33                                 (((pow)-((x)&((pow)-1)))))
34
35 static int inactivity_timeout = 1000;
36 module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
38
39 static int aggregation_timeout = 1;
40 module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR);
41 MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
42
43 /*
44  * HSI padding options.
45  * Warning: must be a base of 2 (& operation used) and can not be zero !
46  */
47 static int hsi_head_align = 4;
48 module_param(hsi_head_align, int, S_IRUGO);
49 MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
50
51 static int hsi_tail_align = 4;
52 module_param(hsi_tail_align, int, S_IRUGO);
53 MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
54
55 /*
56  * HSI link layer flowcontrol thresholds.
57  * Warning: A high threshold value migth increase throughput but it will at
58  * the same time prevent channel prioritization and increase the risk of
59  * flooding the modem. The high threshold should be above the low.
60  */
61 static int hsi_high_threshold = 100;
62 module_param(hsi_high_threshold, int, S_IRUGO);
63 MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
64
65 static int hsi_low_threshold = 50;
66 module_param(hsi_low_threshold, int, S_IRUGO);
67 MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
68
69 #define ON 1
70 #define OFF 0
71
72 /*
73  * Threshold values for the HSI packet queue. Flowcontrol will be asserted
74  * when the number of packets exceeds HIGH_WATER_MARK. It will not be
75  * de-asserted before the number of packets drops below LOW_WATER_MARK.
76  */
77 #define LOW_WATER_MARK   hsi_low_threshold
78 #define HIGH_WATER_MARK  hsi_high_threshold
79
80 static LIST_HEAD(cfhsi_list);
81
82 static void cfhsi_inactivity_tout(unsigned long arg)
83 {
84         struct cfhsi *cfhsi = (struct cfhsi *)arg;
85
86         netdev_dbg(cfhsi->ndev, "%s.\n",
87                 __func__);
88
89         /* Schedule power down work queue. */
90         if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
91                 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
92 }
93
94 static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
95                                            const struct sk_buff *skb,
96                                            int direction)
97 {
98         struct caif_payload_info *info;
99         int hpad, tpad, len;
100
101         info = (struct caif_payload_info *)&skb->cb;
102         hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
103         tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
104         len = skb->len + hpad + tpad;
105
106         if (direction > 0)
107                 cfhsi->aggregation_len += len;
108         else if (direction < 0)
109                 cfhsi->aggregation_len -= len;
110 }
111
112 static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
113 {
114         int i;
115
116         if (cfhsi->aggregation_timeout == 0)
117                 return true;
118
119         for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
120                 if (cfhsi->qhead[i].qlen)
121                         return true;
122         }
123
124         /* TODO: Use aggregation_len instead */
125         if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
126                 return true;
127
128         return false;
129 }
130
131 static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
132 {
133         struct sk_buff *skb;
134         int i;
135
136         for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
137                 skb = skb_dequeue(&cfhsi->qhead[i]);
138                 if (skb)
139                         break;
140         }
141
142         return skb;
143 }
144
145 static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
146 {
147         int i, len = 0;
148         for (i = 0; i < CFHSI_PRIO_LAST; ++i)
149                 len += skb_queue_len(&cfhsi->qhead[i]);
150         return len;
151 }
152
153 static void cfhsi_abort_tx(struct cfhsi *cfhsi)
154 {
155         struct sk_buff *skb;
156
157         for (;;) {
158                 spin_lock_bh(&cfhsi->lock);
159                 skb = cfhsi_dequeue(cfhsi);
160                 if (!skb)
161                         break;
162
163                 cfhsi->ndev->stats.tx_errors++;
164                 cfhsi->ndev->stats.tx_dropped++;
165                 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
166                 spin_unlock_bh(&cfhsi->lock);
167                 kfree_skb(skb);
168         }
169         cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
170         if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
171                 mod_timer(&cfhsi->inactivity_timer,
172                         jiffies + cfhsi->inactivity_timeout);
173         spin_unlock_bh(&cfhsi->lock);
174 }
175
176 static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
177 {
178         char buffer[32]; /* Any reasonable value */
179         size_t fifo_occupancy;
180         int ret;
181
182         netdev_dbg(cfhsi->ndev, "%s.\n",
183                 __func__);
184
185         do {
186                 ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
187                                 &fifo_occupancy);
188                 if (ret) {
189                         netdev_warn(cfhsi->ndev,
190                                 "%s: can't get FIFO occupancy: %d.\n",
191                                 __func__, ret);
192                         break;
193                 } else if (!fifo_occupancy)
194                         /* No more data, exitting normally */
195                         break;
196
197                 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
198                 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
199                 ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
200                                 cfhsi->ops);
201                 if (ret) {
202                         clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
203                         netdev_warn(cfhsi->ndev,
204                                 "%s: can't read data: %d.\n",
205                                 __func__, ret);
206                         break;
207                 }
208
209                 ret = 5 * HZ;
210                 ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
211                          !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
212
213                 if (ret < 0) {
214                         netdev_warn(cfhsi->ndev,
215                                 "%s: can't wait for flush complete: %d.\n",
216                                 __func__, ret);
217                         break;
218                 } else if (!ret) {
219                         ret = -ETIMEDOUT;
220                         netdev_warn(cfhsi->ndev,
221                                 "%s: timeout waiting for flush complete.\n",
222                                 __func__);
223                         break;
224                 }
225         } while (1);
226
227         return ret;
228 }
229
230 static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
231 {
232         int nfrms = 0;
233         int pld_len = 0;
234         struct sk_buff *skb;
235         u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
236
237         skb = cfhsi_dequeue(cfhsi);
238         if (!skb)
239                 return 0;
240
241         /* Clear offset. */
242         desc->offset = 0;
243
244         /* Check if we can embed a CAIF frame. */
245         if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
246                 struct caif_payload_info *info;
247                 int hpad;
248                 int tpad;
249
250                 /* Calculate needed head alignment and tail alignment. */
251                 info = (struct caif_payload_info *)&skb->cb;
252
253                 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
254                 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
255
256                 /* Check if frame still fits with added alignment. */
257                 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
258                         u8 *pemb = desc->emb_frm;
259                         desc->offset = CFHSI_DESC_SHORT_SZ;
260                         *pemb = (u8)(hpad - 1);
261                         pemb += hpad;
262
263                         /* Update network statistics. */
264                         spin_lock_bh(&cfhsi->lock);
265                         cfhsi->ndev->stats.tx_packets++;
266                         cfhsi->ndev->stats.tx_bytes += skb->len;
267                         cfhsi_update_aggregation_stats(cfhsi, skb, -1);
268                         spin_unlock_bh(&cfhsi->lock);
269
270                         /* Copy in embedded CAIF frame. */
271                         skb_copy_bits(skb, 0, pemb, skb->len);
272
273                         /* Consume the SKB */
274                         consume_skb(skb);
275                         skb = NULL;
276                 }
277         }
278
279         /* Create payload CAIF frames. */
280         pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
281         while (nfrms < CFHSI_MAX_PKTS) {
282                 struct caif_payload_info *info;
283                 int hpad;
284                 int tpad;
285
286                 if (!skb)
287                         skb = cfhsi_dequeue(cfhsi);
288
289                 if (!skb)
290                         break;
291
292                 /* Calculate needed head alignment and tail alignment. */
293                 info = (struct caif_payload_info *)&skb->cb;
294
295                 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
296                 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
297
298                 /* Fill in CAIF frame length in descriptor. */
299                 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
300
301                 /* Fill head padding information. */
302                 *pfrm = (u8)(hpad - 1);
303                 pfrm += hpad;
304
305                 /* Update network statistics. */
306                 spin_lock_bh(&cfhsi->lock);
307                 cfhsi->ndev->stats.tx_packets++;
308                 cfhsi->ndev->stats.tx_bytes += skb->len;
309                 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
310                 spin_unlock_bh(&cfhsi->lock);
311
312                 /* Copy in CAIF frame. */
313                 skb_copy_bits(skb, 0, pfrm, skb->len);
314
315                 /* Update payload length. */
316                 pld_len += desc->cffrm_len[nfrms];
317
318                 /* Update frame pointer. */
319                 pfrm += skb->len + tpad;
320
321                 /* Consume the SKB */
322                 consume_skb(skb);
323                 skb = NULL;
324
325                 /* Update number of frames. */
326                 nfrms++;
327         }
328
329         /* Unused length fields should be zero-filled (according to SPEC). */
330         while (nfrms < CFHSI_MAX_PKTS) {
331                 desc->cffrm_len[nfrms] = 0x0000;
332                 nfrms++;
333         }
334
335         /* Check if we can piggy-back another descriptor. */
336         if (cfhsi_can_send_aggregate(cfhsi))
337                 desc->header |= CFHSI_PIGGY_DESC;
338         else
339                 desc->header &= ~CFHSI_PIGGY_DESC;
340
341         return CFHSI_DESC_SZ + pld_len;
342 }
343
344 static void cfhsi_start_tx(struct cfhsi *cfhsi)
345 {
346         struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
347         int len, res;
348
349         netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
350
351         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
352                 return;
353
354         do {
355                 /* Create HSI frame. */
356                 len = cfhsi_tx_frm(desc, cfhsi);
357                 if (!len) {
358                         spin_lock_bh(&cfhsi->lock);
359                         if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
360                                 spin_unlock_bh(&cfhsi->lock);
361                                 res = -EAGAIN;
362                                 continue;
363                         }
364                         cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
365                         /* Start inactivity timer. */
366                         mod_timer(&cfhsi->inactivity_timer,
367                                 jiffies + cfhsi->inactivity_timeout);
368                         spin_unlock_bh(&cfhsi->lock);
369                         break;
370                 }
371
372                 /* Set up new transfer. */
373                 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
374                 if (WARN_ON(res < 0))
375                         netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
376                                 __func__, res);
377         } while (res < 0);
378 }
379
380 static void cfhsi_tx_done(struct cfhsi *cfhsi)
381 {
382         netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
383
384         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
385                 return;
386
387         /*
388          * Send flow on if flow off has been previously signalled
389          * and number of packets is below low water mark.
390          */
391         spin_lock_bh(&cfhsi->lock);
392         if (cfhsi->flow_off_sent &&
393                         cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark &&
394                         cfhsi->cfdev.flowctrl) {
395
396                 cfhsi->flow_off_sent = 0;
397                 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
398         }
399
400         if (cfhsi_can_send_aggregate(cfhsi)) {
401                 spin_unlock_bh(&cfhsi->lock);
402                 cfhsi_start_tx(cfhsi);
403         } else {
404                 mod_timer(&cfhsi->aggregation_timer,
405                         jiffies + cfhsi->aggregation_timeout);
406                 spin_unlock_bh(&cfhsi->lock);
407         }
408
409         return;
410 }
411
412 static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
413 {
414         struct cfhsi *cfhsi;
415
416         cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
417         netdev_dbg(cfhsi->ndev, "%s.\n",
418                 __func__);
419
420         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
421                 return;
422         cfhsi_tx_done(cfhsi);
423 }
424
425 static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
426 {
427         int xfer_sz = 0;
428         int nfrms = 0;
429         u16 *plen = NULL;
430         u8 *pfrm = NULL;
431
432         if ((desc->header & ~CFHSI_PIGGY_DESC) ||
433                         (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
434                 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
435                         __func__);
436                 return -EPROTO;
437         }
438
439         /* Check for embedded CAIF frame. */
440         if (desc->offset) {
441                 struct sk_buff *skb;
442                 u8 *dst = NULL;
443                 int len = 0;
444                 pfrm = ((u8 *)desc) + desc->offset;
445
446                 /* Remove offset padding. */
447                 pfrm += *pfrm + 1;
448
449                 /* Read length of CAIF frame (little endian). */
450                 len = *pfrm;
451                 len |= ((*(pfrm+1)) << 8) & 0xFF00;
452                 len += 2;       /* Add FCS fields. */
453
454                 /* Sanity check length of CAIF frame. */
455                 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
456                         netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
457                                 __func__);
458                         return -EPROTO;
459                 }
460
461                 /* Allocate SKB (OK even in IRQ context). */
462                 skb = alloc_skb(len + 1, GFP_ATOMIC);
463                 if (!skb) {
464                         netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
465                                 __func__);
466                         return -ENOMEM;
467                 }
468                 caif_assert(skb != NULL);
469
470                 dst = skb_put(skb, len);
471                 memcpy(dst, pfrm, len);
472
473                 skb->protocol = htons(ETH_P_CAIF);
474                 skb_reset_mac_header(skb);
475                 skb->dev = cfhsi->ndev;
476
477                 /*
478                  * We are in a callback handler and
479                  * unfortunately we don't know what context we're
480                  * running in.
481                  */
482                 if (in_interrupt())
483                         netif_rx(skb);
484                 else
485                         netif_rx_ni(skb);
486
487                 /* Update network statistics. */
488                 cfhsi->ndev->stats.rx_packets++;
489                 cfhsi->ndev->stats.rx_bytes += len;
490         }
491
492         /* Calculate transfer length. */
493         plen = desc->cffrm_len;
494         while (nfrms < CFHSI_MAX_PKTS && *plen) {
495                 xfer_sz += *plen;
496                 plen++;
497                 nfrms++;
498         }
499
500         /* Check for piggy-backed descriptor. */
501         if (desc->header & CFHSI_PIGGY_DESC)
502                 xfer_sz += CFHSI_DESC_SZ;
503
504         if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
505                 netdev_err(cfhsi->ndev,
506                                 "%s: Invalid payload len: %d, ignored.\n",
507                         __func__, xfer_sz);
508                 return -EPROTO;
509         }
510         return xfer_sz;
511 }
512
513 static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
514 {
515         int xfer_sz = 0;
516         int nfrms = 0;
517         u16 *plen;
518
519         if ((desc->header & ~CFHSI_PIGGY_DESC) ||
520                         (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
521
522                 pr_err("Invalid descriptor. %x %x\n", desc->header,
523                                 desc->offset);
524                 return -EPROTO;
525         }
526
527         /* Calculate transfer length. */
528         plen = desc->cffrm_len;
529         while (nfrms < CFHSI_MAX_PKTS && *plen) {
530                 xfer_sz += *plen;
531                 plen++;
532                 nfrms++;
533         }
534
535         if (xfer_sz % 4) {
536                 pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
537                 return -EPROTO;
538         }
539         return xfer_sz;
540 }
541
542 static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
543 {
544         int rx_sz = 0;
545         int nfrms = 0;
546         u16 *plen = NULL;
547         u8 *pfrm = NULL;
548
549         /* Sanity check header and offset. */
550         if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
551                         (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
552                 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
553                         __func__);
554                 return -EPROTO;
555         }
556
557         /* Set frame pointer to start of payload. */
558         pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
559         plen = desc->cffrm_len;
560
561         /* Skip already processed frames. */
562         while (nfrms < cfhsi->rx_state.nfrms) {
563                 pfrm += *plen;
564                 rx_sz += *plen;
565                 plen++;
566                 nfrms++;
567         }
568
569         /* Parse payload. */
570         while (nfrms < CFHSI_MAX_PKTS && *plen) {
571                 struct sk_buff *skb;
572                 u8 *dst = NULL;
573                 u8 *pcffrm = NULL;
574                 int len;
575
576                 /* CAIF frame starts after head padding. */
577                 pcffrm = pfrm + *pfrm + 1;
578
579                 /* Read length of CAIF frame (little endian). */
580                 len = *pcffrm;
581                 len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
582                 len += 2;       /* Add FCS fields. */
583
584                 /* Sanity check length of CAIF frames. */
585                 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
586                         netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
587                                 __func__);
588                         return -EPROTO;
589                 }
590
591                 /* Allocate SKB (OK even in IRQ context). */
592                 skb = alloc_skb(len + 1, GFP_ATOMIC);
593                 if (!skb) {
594                         netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
595                                 __func__);
596                         cfhsi->rx_state.nfrms = nfrms;
597                         return -ENOMEM;
598                 }
599                 caif_assert(skb != NULL);
600
601                 dst = skb_put(skb, len);
602                 memcpy(dst, pcffrm, len);
603
604                 skb->protocol = htons(ETH_P_CAIF);
605                 skb_reset_mac_header(skb);
606                 skb->dev = cfhsi->ndev;
607
608                 /*
609                  * We're called in callback from HSI
610                  * and don't know the context we're running in.
611                  */
612                 if (in_interrupt())
613                         netif_rx(skb);
614                 else
615                         netif_rx_ni(skb);
616
617                 /* Update network statistics. */
618                 cfhsi->ndev->stats.rx_packets++;
619                 cfhsi->ndev->stats.rx_bytes += len;
620
621                 pfrm += *plen;
622                 rx_sz += *plen;
623                 plen++;
624                 nfrms++;
625         }
626
627         return rx_sz;
628 }
629
630 static void cfhsi_rx_done(struct cfhsi *cfhsi)
631 {
632         int res;
633         int desc_pld_len = 0, rx_len, rx_state;
634         struct cfhsi_desc *desc = NULL;
635         u8 *rx_ptr, *rx_buf;
636         struct cfhsi_desc *piggy_desc = NULL;
637
638         desc = (struct cfhsi_desc *)cfhsi->rx_buf;
639
640         netdev_dbg(cfhsi->ndev, "%s\n", __func__);
641
642         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
643                 return;
644
645         /* Update inactivity timer if pending. */
646         spin_lock_bh(&cfhsi->lock);
647         mod_timer_pending(&cfhsi->inactivity_timer,
648                         jiffies + cfhsi->inactivity_timeout);
649         spin_unlock_bh(&cfhsi->lock);
650
651         if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
652                 desc_pld_len = cfhsi_rx_desc_len(desc);
653
654                 if (desc_pld_len < 0)
655                         goto out_of_sync;
656
657                 rx_buf = cfhsi->rx_buf;
658                 rx_len = desc_pld_len;
659                 if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
660                         rx_len += CFHSI_DESC_SZ;
661                 if (desc_pld_len == 0)
662                         rx_buf = cfhsi->rx_flip_buf;
663         } else {
664                 rx_buf = cfhsi->rx_flip_buf;
665
666                 rx_len = CFHSI_DESC_SZ;
667                 if (cfhsi->rx_state.pld_len > 0 &&
668                                 (desc->header & CFHSI_PIGGY_DESC)) {
669
670                         piggy_desc = (struct cfhsi_desc *)
671                                 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
672                                                 cfhsi->rx_state.pld_len);
673
674                         cfhsi->rx_state.piggy_desc = true;
675
676                         /* Extract payload len from piggy-backed descriptor. */
677                         desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
678                         if (desc_pld_len < 0)
679                                 goto out_of_sync;
680
681                         if (desc_pld_len > 0) {
682                                 rx_len = desc_pld_len;
683                                 if (piggy_desc->header & CFHSI_PIGGY_DESC)
684                                         rx_len += CFHSI_DESC_SZ;
685                         }
686
687                         /*
688                          * Copy needed information from the piggy-backed
689                          * descriptor to the descriptor in the start.
690                          */
691                         memcpy(rx_buf, (u8 *)piggy_desc,
692                                         CFHSI_DESC_SHORT_SZ);
693                         /* Mark no embedded frame here */
694                         piggy_desc->offset = 0;
695                 }
696         }
697
698         if (desc_pld_len) {
699                 rx_state = CFHSI_RX_STATE_PAYLOAD;
700                 rx_ptr = rx_buf + CFHSI_DESC_SZ;
701         } else {
702                 rx_state = CFHSI_RX_STATE_DESC;
703                 rx_ptr = rx_buf;
704                 rx_len = CFHSI_DESC_SZ;
705         }
706
707         /* Initiate next read */
708         if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
709                 /* Set up new transfer. */
710                 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
711                                 __func__);
712
713                 res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
714                                 cfhsi->ops);
715                 if (WARN_ON(res < 0)) {
716                         netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
717                                 __func__, res);
718                         cfhsi->ndev->stats.rx_errors++;
719                         cfhsi->ndev->stats.rx_dropped++;
720                 }
721         }
722
723         if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
724                 /* Extract payload from descriptor */
725                 if (cfhsi_rx_desc(desc, cfhsi) < 0)
726                         goto out_of_sync;
727         } else {
728                 /* Extract payload */
729                 if (cfhsi_rx_pld(desc, cfhsi) < 0)
730                         goto out_of_sync;
731                 if (piggy_desc) {
732                         /* Extract any payload in piggyback descriptor. */
733                         if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
734                                 goto out_of_sync;
735                 }
736         }
737
738         /* Update state info */
739         memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
740         cfhsi->rx_state.state = rx_state;
741         cfhsi->rx_ptr = rx_ptr;
742         cfhsi->rx_len = rx_len;
743         cfhsi->rx_state.pld_len = desc_pld_len;
744         cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
745
746         if (rx_buf != cfhsi->rx_buf)
747                 swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
748         return;
749
750 out_of_sync:
751         netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
752         print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
753                         cfhsi->rx_buf, CFHSI_DESC_SZ);
754         schedule_work(&cfhsi->out_of_sync_work);
755 }
756
757 static void cfhsi_rx_slowpath(unsigned long arg)
758 {
759         struct cfhsi *cfhsi = (struct cfhsi *)arg;
760
761         netdev_dbg(cfhsi->ndev, "%s.\n",
762                 __func__);
763
764         cfhsi_rx_done(cfhsi);
765 }
766
767 static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
768 {
769         struct cfhsi *cfhsi;
770
771         cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
772         netdev_dbg(cfhsi->ndev, "%s.\n",
773                 __func__);
774
775         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
776                 return;
777
778         if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
779                 wake_up_interruptible(&cfhsi->flush_fifo_wait);
780         else
781                 cfhsi_rx_done(cfhsi);
782 }
783
784 static void cfhsi_wake_up(struct work_struct *work)
785 {
786         struct cfhsi *cfhsi = NULL;
787         int res;
788         int len;
789         long ret;
790
791         cfhsi = container_of(work, struct cfhsi, wake_up_work);
792
793         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
794                 return;
795
796         if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
797                 /* It happenes when wakeup is requested by
798                  * both ends at the same time. */
799                 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
800                 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
801                 return;
802         }
803
804         /* Activate wake line. */
805         cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
806
807         netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
808                 __func__);
809
810         /* Wait for acknowledge. */
811         ret = CFHSI_WAKE_TOUT;
812         ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
813                                         test_and_clear_bit(CFHSI_WAKE_UP_ACK,
814                                                         &cfhsi->bits), ret);
815         if (unlikely(ret < 0)) {
816                 /* Interrupted by signal. */
817                 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
818                         __func__, ret);
819
820                 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
821                 cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
822                 return;
823         } else if (!ret) {
824                 bool ca_wake = false;
825                 size_t fifo_occupancy = 0;
826
827                 /* Wakeup timeout */
828                 netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
829                         __func__);
830
831                 /* Check FIFO to check if modem has sent something. */
832                 WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
833                                         &fifo_occupancy));
834
835                 netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
836                                 __func__, (unsigned) fifo_occupancy);
837
838                 /* Check if we misssed the interrupt. */
839                 WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
840                                                         &ca_wake));
841
842                 if (ca_wake) {
843                         netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
844                                 __func__);
845
846                         /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
847                         clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
848
849                         /* Continue execution. */
850                         goto wake_ack;
851                 }
852
853                 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
854                 cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
855                 return;
856         }
857 wake_ack:
858         netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
859                 __func__);
860
861         /* Clear power up bit. */
862         set_bit(CFHSI_AWAKE, &cfhsi->bits);
863         clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
864
865         /* Resume read operation. */
866         netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
867         res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
868
869         if (WARN_ON(res < 0))
870                 netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
871
872         /* Clear power up acknowledment. */
873         clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
874
875         spin_lock_bh(&cfhsi->lock);
876
877         /* Resume transmit if queues are not empty. */
878         if (!cfhsi_tx_queue_len(cfhsi)) {
879                 netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
880                         __func__);
881                 /* Start inactivity timer. */
882                 mod_timer(&cfhsi->inactivity_timer,
883                                 jiffies + cfhsi->inactivity_timeout);
884                 spin_unlock_bh(&cfhsi->lock);
885                 return;
886         }
887
888         netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
889                 __func__);
890
891         spin_unlock_bh(&cfhsi->lock);
892
893         /* Create HSI frame. */
894         len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
895
896         if (likely(len > 0)) {
897                 /* Set up new transfer. */
898                 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
899                 if (WARN_ON(res < 0)) {
900                         netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
901                                 __func__, res);
902                         cfhsi_abort_tx(cfhsi);
903                 }
904         } else {
905                 netdev_err(cfhsi->ndev,
906                                 "%s: Failed to create HSI frame: %d.\n",
907                                 __func__, len);
908         }
909 }
910
911 static void cfhsi_wake_down(struct work_struct *work)
912 {
913         long ret;
914         struct cfhsi *cfhsi = NULL;
915         size_t fifo_occupancy = 0;
916         int retry = CFHSI_WAKE_TOUT;
917
918         cfhsi = container_of(work, struct cfhsi, wake_down_work);
919         netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
920
921         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
922                 return;
923
924         /* Deactivate wake line. */
925         cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
926
927         /* Wait for acknowledge. */
928         ret = CFHSI_WAKE_TOUT;
929         ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
930                                         test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
931                                                         &cfhsi->bits), ret);
932         if (ret < 0) {
933                 /* Interrupted by signal. */
934                 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
935                         __func__, ret);
936                 return;
937         } else if (!ret) {
938                 bool ca_wake = true;
939
940                 /* Timeout */
941                 netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
942
943                 /* Check if we misssed the interrupt. */
944                 WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
945                                                         &ca_wake));
946                 if (!ca_wake)
947                         netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
948                                 __func__);
949         }
950
951         /* Check FIFO occupancy. */
952         while (retry) {
953                 WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
954                                                         &fifo_occupancy));
955
956                 if (!fifo_occupancy)
957                         break;
958
959                 set_current_state(TASK_INTERRUPTIBLE);
960                 schedule_timeout(1);
961                 retry--;
962         }
963
964         if (!retry)
965                 netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
966
967         /* Clear AWAKE condition. */
968         clear_bit(CFHSI_AWAKE, &cfhsi->bits);
969
970         /* Cancel pending RX requests. */
971         cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
972 }
973
974 static void cfhsi_out_of_sync(struct work_struct *work)
975 {
976         struct cfhsi *cfhsi = NULL;
977
978         cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
979
980         rtnl_lock();
981         dev_close(cfhsi->ndev);
982         rtnl_unlock();
983 }
984
985 static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
986 {
987         struct cfhsi *cfhsi = NULL;
988
989         cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
990         netdev_dbg(cfhsi->ndev, "%s.\n",
991                 __func__);
992
993         set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
994         wake_up_interruptible(&cfhsi->wake_up_wait);
995
996         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
997                 return;
998
999         /* Schedule wake up work queue if the peer initiates. */
1000         if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1001                 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1002 }
1003
1004 static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
1005 {
1006         struct cfhsi *cfhsi = NULL;
1007
1008         cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
1009         netdev_dbg(cfhsi->ndev, "%s.\n",
1010                 __func__);
1011
1012         /* Initiating low power is only permitted by the host (us). */
1013         set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1014         wake_up_interruptible(&cfhsi->wake_down_wait);
1015 }
1016
1017 static void cfhsi_aggregation_tout(unsigned long arg)
1018 {
1019         struct cfhsi *cfhsi = (struct cfhsi *)arg;
1020
1021         netdev_dbg(cfhsi->ndev, "%s.\n",
1022                 __func__);
1023
1024         cfhsi_start_tx(cfhsi);
1025 }
1026
1027 static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1028 {
1029         struct cfhsi *cfhsi = NULL;
1030         int start_xfer = 0;
1031         int timer_active;
1032         int prio;
1033
1034         if (!dev)
1035                 return -EINVAL;
1036
1037         cfhsi = netdev_priv(dev);
1038
1039         switch (skb->priority) {
1040         case TC_PRIO_BESTEFFORT:
1041         case TC_PRIO_FILLER:
1042         case TC_PRIO_BULK:
1043                 prio = CFHSI_PRIO_BEBK;
1044                 break;
1045         case TC_PRIO_INTERACTIVE_BULK:
1046                 prio = CFHSI_PRIO_VI;
1047                 break;
1048         case TC_PRIO_INTERACTIVE:
1049                 prio = CFHSI_PRIO_VO;
1050                 break;
1051         case TC_PRIO_CONTROL:
1052         default:
1053                 prio = CFHSI_PRIO_CTL;
1054                 break;
1055         }
1056
1057         spin_lock_bh(&cfhsi->lock);
1058
1059         /* Update aggregation statistics  */
1060         cfhsi_update_aggregation_stats(cfhsi, skb, 1);
1061
1062         /* Queue the SKB */
1063         skb_queue_tail(&cfhsi->qhead[prio], skb);
1064
1065         /* Sanity check; xmit should not be called after unregister_netdev */
1066         if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
1067                 spin_unlock_bh(&cfhsi->lock);
1068                 cfhsi_abort_tx(cfhsi);
1069                 return -EINVAL;
1070         }
1071
1072         /* Send flow off if number of packets is above high water mark. */
1073         if (!cfhsi->flow_off_sent &&
1074                 cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark &&
1075                 cfhsi->cfdev.flowctrl) {
1076                 cfhsi->flow_off_sent = 1;
1077                 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
1078         }
1079
1080         if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
1081                 cfhsi->tx_state = CFHSI_TX_STATE_XFER;
1082                 start_xfer = 1;
1083         }
1084
1085         if (!start_xfer) {
1086                 /* Send aggregate if it is possible */
1087                 bool aggregate_ready =
1088                         cfhsi_can_send_aggregate(cfhsi) &&
1089                         del_timer(&cfhsi->aggregation_timer) > 0;
1090                 spin_unlock_bh(&cfhsi->lock);
1091                 if (aggregate_ready)
1092                         cfhsi_start_tx(cfhsi);
1093                 return 0;
1094         }
1095
1096         /* Delete inactivity timer if started. */
1097         timer_active = del_timer_sync(&cfhsi->inactivity_timer);
1098
1099         spin_unlock_bh(&cfhsi->lock);
1100
1101         if (timer_active) {
1102                 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
1103                 int len;
1104                 int res;
1105
1106                 /* Create HSI frame. */
1107                 len = cfhsi_tx_frm(desc, cfhsi);
1108                 WARN_ON(!len);
1109
1110                 /* Set up new transfer. */
1111                 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
1112                 if (WARN_ON(res < 0)) {
1113                         netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
1114                                 __func__, res);
1115                         cfhsi_abort_tx(cfhsi);
1116                 }
1117         } else {
1118                 /* Schedule wake up work queue if the we initiate. */
1119                 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1120                         queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1121         }
1122
1123         return 0;
1124 }
1125
1126 static const struct net_device_ops cfhsi_netdevops;
1127
1128 static void cfhsi_setup(struct net_device *dev)
1129 {
1130         int i;
1131         struct cfhsi *cfhsi = netdev_priv(dev);
1132         dev->features = 0;
1133         dev->type = ARPHRD_CAIF;
1134         dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1135         dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1136         dev->tx_queue_len = 0;
1137         dev->destructor = free_netdev;
1138         dev->netdev_ops = &cfhsi_netdevops;
1139         for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1140                 skb_queue_head_init(&cfhsi->qhead[i]);
1141         cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1142         cfhsi->cfdev.use_frag = false;
1143         cfhsi->cfdev.use_stx = false;
1144         cfhsi->cfdev.use_fcs = false;
1145         cfhsi->ndev = dev;
1146 }
1147
1148 static int cfhsi_open(struct net_device *ndev)
1149 {
1150         struct cfhsi *cfhsi = netdev_priv(ndev);
1151         int res;
1152
1153         clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1154
1155         /* Initialize state vaiables. */
1156         cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
1157         cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
1158
1159         /* Set flow info */
1160         cfhsi->flow_off_sent = 0;
1161         cfhsi->q_low_mark = LOW_WATER_MARK;
1162         cfhsi->q_high_mark = HIGH_WATER_MARK;
1163
1164
1165         /*
1166          * Allocate a TX buffer with the size of a HSI packet descriptors
1167          * and the necessary room for CAIF payload frames.
1168          */
1169         cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
1170         if (!cfhsi->tx_buf) {
1171                 res = -ENODEV;
1172                 goto err_alloc_tx;
1173         }
1174
1175         /*
1176          * Allocate a RX buffer with the size of two HSI packet descriptors and
1177          * the necessary room for CAIF payload frames.
1178          */
1179         cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1180         if (!cfhsi->rx_buf) {
1181                 res = -ENODEV;
1182                 goto err_alloc_rx;
1183         }
1184
1185         cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1186         if (!cfhsi->rx_flip_buf) {
1187                 res = -ENODEV;
1188                 goto err_alloc_rx_flip;
1189         }
1190
1191         /* Pre-calculate inactivity timeout. */
1192         if (inactivity_timeout != -1) {
1193                 cfhsi->inactivity_timeout =
1194                                 inactivity_timeout * HZ / 1000;
1195                 if (!cfhsi->inactivity_timeout)
1196                         cfhsi->inactivity_timeout = 1;
1197                 else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1198                         cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1199         } else {
1200                 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1201         }
1202
1203         /* Initialize aggregation timeout */
1204         cfhsi->aggregation_timeout = aggregation_timeout;
1205
1206         /* Initialize recieve vaiables. */
1207         cfhsi->rx_ptr = cfhsi->rx_buf;
1208         cfhsi->rx_len = CFHSI_DESC_SZ;
1209
1210         /* Initialize spin locks. */
1211         spin_lock_init(&cfhsi->lock);
1212
1213         /* Set up the driver. */
1214         cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
1215         cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
1216         cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
1217         cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
1218
1219         /* Initialize the work queues. */
1220         INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1221         INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
1222         INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
1223
1224         /* Clear all bit fields. */
1225         clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1226         clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1227         clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1228         clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1229
1230         /* Create work thread. */
1231         cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name);
1232         if (!cfhsi->wq) {
1233                 netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
1234                         __func__);
1235                 res = -ENODEV;
1236                 goto err_create_wq;
1237         }
1238
1239         /* Initialize wait queues. */
1240         init_waitqueue_head(&cfhsi->wake_up_wait);
1241         init_waitqueue_head(&cfhsi->wake_down_wait);
1242         init_waitqueue_head(&cfhsi->flush_fifo_wait);
1243
1244         /* Setup the inactivity timer. */
1245         init_timer(&cfhsi->inactivity_timer);
1246         cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
1247         cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
1248         /* Setup the slowpath RX timer. */
1249         init_timer(&cfhsi->rx_slowpath_timer);
1250         cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1251         cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
1252         /* Setup the aggregation timer. */
1253         init_timer(&cfhsi->aggregation_timer);
1254         cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
1255         cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
1256
1257         /* Activate HSI interface. */
1258         res = cfhsi->ops->cfhsi_up(cfhsi->ops);
1259         if (res) {
1260                 netdev_err(cfhsi->ndev,
1261                         "%s: can't activate HSI interface: %d.\n",
1262                         __func__, res);
1263                 goto err_activate;
1264         }
1265
1266         /* Flush FIFO */
1267         res = cfhsi_flush_fifo(cfhsi);
1268         if (res) {
1269                 netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
1270                         __func__, res);
1271                 goto err_net_reg;
1272         }
1273         return res;
1274
1275  err_net_reg:
1276         cfhsi->ops->cfhsi_down(cfhsi->ops);
1277  err_activate:
1278         destroy_workqueue(cfhsi->wq);
1279  err_create_wq:
1280         kfree(cfhsi->rx_flip_buf);
1281  err_alloc_rx_flip:
1282         kfree(cfhsi->rx_buf);
1283  err_alloc_rx:
1284         kfree(cfhsi->tx_buf);
1285  err_alloc_tx:
1286         return res;
1287 }
1288
1289 static int cfhsi_close(struct net_device *ndev)
1290 {
1291         struct cfhsi *cfhsi = netdev_priv(ndev);
1292         u8 *tx_buf, *rx_buf, *flip_buf;
1293
1294         /* going to shutdown driver */
1295         set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1296
1297         /* Flush workqueue */
1298         flush_workqueue(cfhsi->wq);
1299
1300         /* Delete timers if pending */
1301         del_timer_sync(&cfhsi->inactivity_timer);
1302         del_timer_sync(&cfhsi->rx_slowpath_timer);
1303         del_timer_sync(&cfhsi->aggregation_timer);
1304
1305         /* Cancel pending RX request (if any) */
1306         cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
1307
1308         /* Destroy workqueue */
1309         destroy_workqueue(cfhsi->wq);
1310
1311         /* Store bufferes: will be freed later. */
1312         tx_buf = cfhsi->tx_buf;
1313         rx_buf = cfhsi->rx_buf;
1314         flip_buf = cfhsi->rx_flip_buf;
1315         /* Flush transmit queues. */
1316         cfhsi_abort_tx(cfhsi);
1317
1318         /* Deactivate interface */
1319         cfhsi->ops->cfhsi_down(cfhsi->ops);
1320
1321         /* Free buffers. */
1322         kfree(tx_buf);
1323         kfree(rx_buf);
1324         kfree(flip_buf);
1325         return 0;
1326 }
1327
1328 static void cfhsi_uninit(struct net_device *dev)
1329 {
1330         struct cfhsi *cfhsi = netdev_priv(dev);
1331         ASSERT_RTNL();
1332         symbol_put(cfhsi_get_device);
1333         list_del(&cfhsi->list);
1334 }
1335
1336 static const struct net_device_ops cfhsi_netdevops = {
1337         .ndo_uninit = cfhsi_uninit,
1338         .ndo_open = cfhsi_open,
1339         .ndo_stop = cfhsi_close,
1340         .ndo_start_xmit = cfhsi_xmit
1341 };
1342
1343 static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
1344 {
1345         int i;
1346
1347         if (!data) {
1348                 pr_debug("no params data found\n");
1349                 return;
1350         }
1351
1352         i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
1353         if (data[i])
1354                 inactivity_timeout = nla_get_u32(data[i]);
1355
1356         i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
1357         if (data[i])
1358                 aggregation_timeout = nla_get_u32(data[i]);
1359
1360         i = __IFLA_CAIF_HSI_HEAD_ALIGN;
1361         if (data[i])
1362                 hsi_head_align = nla_get_u32(data[i]);
1363
1364         i = __IFLA_CAIF_HSI_TAIL_ALIGN;
1365         if (data[i])
1366                 hsi_tail_align = nla_get_u32(data[i]);
1367
1368         i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
1369         if (data[i])
1370                 hsi_high_threshold = nla_get_u32(data[i]);
1371 }
1372
1373 static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
1374                                 struct nlattr *data[])
1375 {
1376         cfhsi_netlink_parms(data, netdev_priv(dev));
1377         netdev_state_change(dev);
1378         return 0;
1379 }
1380
1381 static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
1382         [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
1383         [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
1384         [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
1385         [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
1386         [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
1387         [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
1388 };
1389
1390 static size_t caif_hsi_get_size(const struct net_device *dev)
1391 {
1392         int i;
1393         size_t s = 0;
1394         for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
1395                 s += nla_total_size(caif_hsi_policy[i].len);
1396         return s;
1397 }
1398
1399 static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
1400 {
1401         if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
1402                         inactivity_timeout) ||
1403             nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
1404                         aggregation_timeout) ||
1405             nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN, hsi_head_align) ||
1406             nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN, hsi_tail_align) ||
1407             nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
1408                         hsi_high_threshold) ||
1409             nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
1410                         hsi_low_threshold))
1411                 return -EMSGSIZE;
1412
1413         return 0;
1414 }
1415
1416 static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
1417                           struct nlattr *tb[], struct nlattr *data[])
1418 {
1419         struct cfhsi *cfhsi = NULL;
1420         struct cfhsi_ops *(*get_ops)(void);
1421
1422         ASSERT_RTNL();
1423
1424         cfhsi = netdev_priv(dev);
1425         cfhsi_netlink_parms(data, cfhsi);
1426         dev_net_set(cfhsi->ndev, src_net);
1427
1428         get_ops = symbol_get(cfhsi_get_ops);
1429         if (!get_ops) {
1430                 pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1431                 return -ENODEV;
1432         }
1433
1434         /* Assign the HSI device. */
1435         cfhsi->ops = (*get_ops)();
1436         if (!cfhsi->ops) {
1437                 pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1438                 goto err;
1439         }
1440
1441         /* Assign the driver to this HSI device. */
1442         cfhsi->ops->cb_ops = &cfhsi->cb_ops;
1443         if (register_netdevice(dev)) {
1444                 pr_warn("%s: caif_hsi device registration failed\n", __func__);
1445                 goto err;
1446         }
1447         /* Add CAIF HSI device to list. */
1448         list_add_tail(&cfhsi->list, &cfhsi_list);
1449
1450         return 0;
1451 err:
1452         symbol_put(cfhsi_get_ops);
1453         return -ENODEV;
1454 }
1455
1456 static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
1457         .kind           = "cfhsi",
1458         .priv_size      = sizeof(struct cfhsi),
1459         .setup          = cfhsi_setup,
1460         .maxtype        = __IFLA_CAIF_HSI_MAX,
1461         .policy = caif_hsi_policy,
1462         .newlink        = caif_hsi_newlink,
1463         .changelink     = caif_hsi_changelink,
1464         .get_size       = caif_hsi_get_size,
1465         .fill_info      = caif_hsi_fill_info,
1466 };
1467
1468 static void __exit cfhsi_exit_module(void)
1469 {
1470         struct list_head *list_node;
1471         struct list_head *n;
1472         struct cfhsi *cfhsi;
1473
1474         rtnl_link_unregister(&caif_hsi_link_ops);
1475
1476         rtnl_lock();
1477         list_for_each_safe(list_node, n, &cfhsi_list) {
1478                 cfhsi = list_entry(list_node, struct cfhsi, list);
1479                 unregister_netdev(cfhsi->ndev);
1480         }
1481         rtnl_unlock();
1482 }
1483
1484 static int __init cfhsi_init_module(void)
1485 {
1486         return rtnl_link_register(&caif_hsi_link_ops);
1487 }
1488
1489 module_init(cfhsi_init_module);
1490 module_exit(cfhsi_exit_module);