1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/timer.h>
10 #include <linux/sched.h>
11 #include <linux/netdevice.h>
12 #include <linux/errno.h>
13 #include <linux/ieee80211.h>
15 #include "ozprotocol.h"
22 #include <asm/unaligned.h>
23 #include <linux/uaccess.h>
24 #include <net/psnap.h>
25 /*------------------------------------------------------------------------------
27 #define OZ_CF_CONN_SUCCESS 1
28 #define OZ_CF_CONN_FAILURE 2
33 #define OZ_MAX_TIMER_POOL_SIZE 16
35 /*------------------------------------------------------------------------------
38 struct packet_type ptype;
39 char name[OZ_MAX_BINDING_LEN];
40 struct list_head link;
43 /*------------------------------------------------------------------------------
44 * Static external variables.
46 static DEFINE_SPINLOCK(g_polling_lock);
47 static LIST_HEAD(g_pd_list);
48 static LIST_HEAD(g_binding);
49 static DEFINE_SPINLOCK(g_binding_lock);
50 static struct sk_buff_head g_rx_queue;
51 static u8 g_session_id;
52 static u16 g_apps = 0x1;
53 static int g_processing_rx;
54 /*------------------------------------------------------------------------------
55 * Context: softirq-serialized
57 static u8 oz_get_new_session_id(u8 exclude)
59 if (++g_session_id == 0)
61 if (g_session_id == exclude) {
62 if (++g_session_id == 0)
67 /*------------------------------------------------------------------------------
68 * Context: softirq-serialized
70 static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
73 struct net_device *dev = pd->net_dev;
74 struct oz_hdr *oz_hdr;
76 struct oz_elt_connect_rsp *body;
78 int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
79 sizeof(struct oz_elt_connect_rsp);
80 skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
83 skb_reserve(skb, LL_RESERVED_SPACE(dev));
84 skb_reset_network_header(skb);
85 oz_hdr = (struct oz_hdr *)skb_put(skb, sz);
86 elt = (struct oz_elt *)(oz_hdr+1);
87 body = (struct oz_elt_connect_rsp *)(elt+1);
89 skb->protocol = htons(OZ_ETHERTYPE);
90 /* Fill in device header */
91 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
92 dev->dev_addr, skb->len) < 0) {
96 oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT);
97 oz_hdr->last_pkt_num = 0;
98 put_unaligned(0, &oz_hdr->pkt_num);
99 elt->type = OZ_ELT_CONNECT_RSP;
100 elt->length = sizeof(struct oz_elt_connect_rsp);
101 memset(body, 0, sizeof(struct oz_elt_connect_rsp));
102 body->status = status;
104 body->mode = pd->mode;
105 body->session_id = pd->session_id;
106 put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
108 oz_dbg(ON, "TX: OZ_ELT_CONNECT_RSP %d", status);
112 /*------------------------------------------------------------------------------
113 * Context: softirq-serialized
115 static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
117 unsigned long keep_alive = kalive & OZ_KALIVE_VALUE_MASK;
119 switch (kalive & OZ_KALIVE_TYPE_MASK) {
120 case OZ_KALIVE_SPECIAL:
121 pd->keep_alive = keep_alive * 1000*60*60*24*20;
124 pd->keep_alive = keep_alive*1000;
127 pd->keep_alive = keep_alive*1000*60;
129 case OZ_KALIVE_HOURS:
130 pd->keep_alive = keep_alive*1000*60*60;
135 oz_dbg(ON, "Keepalive = %lu mSec\n", pd->keep_alive);
137 /*------------------------------------------------------------------------------
138 * Context: softirq-serialized
140 static void pd_set_presleep(struct oz_pd *pd, u8 presleep, u8 start_timer)
143 pd->presleep = presleep*100;
145 pd->presleep = OZ_PRESLEEP_TOUT;
147 spin_unlock(&g_polling_lock);
148 oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
149 spin_lock(&g_polling_lock);
151 oz_dbg(ON, "Presleep time = %lu mSec\n", pd->presleep);
153 /*------------------------------------------------------------------------------
154 * Context: softirq-serialized
156 static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
157 const u8 *pd_addr, struct net_device *net_dev)
160 struct oz_elt_connect_req *body =
161 (struct oz_elt_connect_req *)(elt+1);
162 u8 rsp_status = OZ_STATUS_SUCCESS;
164 u16 new_apps = g_apps;
165 struct net_device *old_net_dev = NULL;
166 struct oz_pd *free_pd = NULL;
170 spin_lock_bh(&g_polling_lock);
172 struct oz_pd *pd2 = NULL;
174 pd = oz_pd_alloc(pd_addr);
177 getnstimeofday(&pd->last_rx_timestamp);
178 spin_lock_bh(&g_polling_lock);
179 list_for_each(e, &g_pd_list) {
180 pd2 = container_of(e, struct oz_pd, link);
181 if (memcmp(pd2->mac_addr, pd_addr, ETH_ALEN) == 0) {
188 list_add_tail(&pd->link, &g_pd_list);
191 spin_unlock_bh(&g_polling_lock);
194 if (pd->net_dev != net_dev) {
195 old_net_dev = pd->net_dev;
197 pd->net_dev = net_dev;
199 oz_dbg(ON, "Host vendor: %d\n", body->host_vendor);
200 pd->max_tx_size = OZ_MAX_TX_SIZE;
201 pd->mode = body->mode;
202 pd->pd_info = body->pd_info;
203 if (pd->mode & OZ_F_ISOC_NO_ELTS) {
204 pd->ms_per_isoc = body->ms_per_isoc;
205 if (!pd->ms_per_isoc)
208 switch (body->ms_isoc_latency & OZ_LATENCY_MASK) {
209 case OZ_ONE_MS_LATENCY:
210 pd->isoc_latency = (body->ms_isoc_latency &
211 ~OZ_LATENCY_MASK) / pd->ms_per_isoc;
213 case OZ_TEN_MS_LATENCY:
214 pd->isoc_latency = ((body->ms_isoc_latency &
215 ~OZ_LATENCY_MASK) * 10) / pd->ms_per_isoc;
218 pd->isoc_latency = OZ_MAX_TX_QUEUE_ISOC;
221 if (body->max_len_div16)
222 pd->max_tx_size = ((u16)body->max_len_div16)<<4;
223 oz_dbg(ON, "Max frame:%u Ms per isoc:%u\n",
224 pd->max_tx_size, pd->ms_per_isoc);
225 pd->max_stream_buffering = 3*1024;
226 pd->pulse_period = OZ_QUANTUM;
227 pd_set_presleep(pd, body->presleep, 0);
228 pd_set_keepalive(pd, body->keep_alive);
230 new_apps &= le16_to_cpu(get_unaligned(&body->apps));
231 if ((new_apps & 0x1) && (body->session_id)) {
232 if (pd->session_id) {
233 if (pd->session_id != body->session_id) {
234 rsp_status = OZ_STATUS_SESSION_MISMATCH;
238 new_apps &= ~0x1; /* Resume not permitted */
240 oz_get_new_session_id(body->session_id);
243 if (pd->session_id && !body->session_id) {
244 rsp_status = OZ_STATUS_SESSION_TEARDOWN;
247 new_apps &= ~0x1; /* Resume not permitted */
249 oz_get_new_session_id(body->session_id);
253 if (rsp_status == OZ_STATUS_SUCCESS) {
254 u16 start_apps = new_apps & ~pd->total_apps & ~0x1;
255 u16 stop_apps = pd->total_apps & ~new_apps & ~0x1;
256 u16 resume_apps = new_apps & pd->paused_apps & ~0x1;
257 spin_unlock_bh(&g_polling_lock);
258 oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
259 oz_dbg(ON, "new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
260 new_apps, pd->total_apps, pd->paused_apps);
262 if (oz_services_start(pd, start_apps, 0))
263 rsp_status = OZ_STATUS_TOO_MANY_PDS;
266 if (oz_services_start(pd, resume_apps, 1))
267 rsp_status = OZ_STATUS_TOO_MANY_PDS;
269 oz_services_stop(pd, stop_apps, 0);
270 oz_pd_request_heartbeat(pd);
272 spin_unlock_bh(&g_polling_lock);
274 oz_send_conn_rsp(pd, rsp_status);
275 if (rsp_status != OZ_STATUS_SUCCESS) {
282 dev_put(old_net_dev);
284 oz_pd_destroy(free_pd);
287 /*------------------------------------------------------------------------------
288 * Context: softirq-serialized
290 static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
291 const u8 *report, u8 len)
293 struct oz_farewell *f;
294 struct oz_farewell *f2;
297 f = kmalloc(sizeof(struct oz_farewell) + len, GFP_ATOMIC);
303 memcpy(f->report, report, len);
304 oz_dbg(ON, "RX: Adding farewell report\n");
305 spin_lock(&g_polling_lock);
306 list_for_each_entry(f2, &pd->farewell_list, link) {
307 if ((f2->ep_num == ep_num) && (f2->index == index)) {
313 list_add_tail(&f->link, &pd->farewell_list);
314 spin_unlock(&g_polling_lock);
318 /*------------------------------------------------------------------------------
319 * Context: softirq-serialized
321 static void oz_rx_frame(struct sk_buff *skb)
327 struct oz_pd *pd = NULL;
328 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
329 struct timespec current_time;
333 oz_dbg(RX_FRAMES, "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
334 oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
335 mac_hdr = skb_mac_header(skb);
336 src_addr = &mac_hdr[ETH_ALEN] ;
339 /* Check the version field */
340 if (oz_get_prot_ver(oz_hdr->control) != OZ_PROTOCOL_VERSION) {
341 oz_dbg(ON, "Incorrect protocol version: %d\n",
342 oz_get_prot_ver(oz_hdr->control));
346 pkt_num = le32_to_cpu(get_unaligned(&oz_hdr->pkt_num));
348 pd = oz_pd_find(src_addr);
350 if (!(pd->state & OZ_PD_S_CONNECTED))
351 oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
352 getnstimeofday(¤t_time);
353 if ((current_time.tv_sec != pd->last_rx_timestamp.tv_sec) ||
354 (pd->presleep < MSEC_PER_SEC)) {
355 oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
356 pd->last_rx_timestamp = current_time;
358 if (pkt_num != pd->last_rx_pkt_num) {
359 pd->last_rx_pkt_num = pkt_num;
362 oz_dbg(ON, "Duplicate frame\n");
366 if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) {
367 oz_dbg(RX_FRAMES, "Received TRIGGER Frame\n");
368 pd->last_sent_frame = &pd->tx_queue;
369 if (oz_hdr->control & OZ_F_ACK) {
370 /* Retire completed frames */
371 oz_retire_tx_frames(pd, oz_hdr->last_pkt_num);
373 if ((oz_hdr->control & OZ_F_ACK_REQUESTED) &&
374 (pd->state == OZ_PD_S_CONNECTED)) {
375 int backlog = pd->nb_queued_frames;
376 pd->trigger_pkt_num = pkt_num;
377 /* Send queued frames */
378 oz_send_queued_frames(pd, backlog);
382 length -= sizeof(struct oz_hdr);
383 elt = (struct oz_elt *)((u8 *)oz_hdr + sizeof(struct oz_hdr));
385 while (length >= sizeof(struct oz_elt)) {
386 length -= sizeof(struct oz_elt) + elt->length;
390 case OZ_ELT_CONNECT_REQ:
391 oz_dbg(ON, "RX: OZ_ELT_CONNECT_REQ\n");
392 pd = oz_connect_req(pd, elt, src_addr, skb->dev);
394 case OZ_ELT_DISCONNECT:
395 oz_dbg(ON, "RX: OZ_ELT_DISCONNECT\n");
399 case OZ_ELT_UPDATE_PARAM_REQ: {
400 struct oz_elt_update_param *body =
401 (struct oz_elt_update_param *)(elt + 1);
402 oz_dbg(ON, "RX: OZ_ELT_UPDATE_PARAM_REQ\n");
403 if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
404 spin_lock(&g_polling_lock);
405 pd_set_keepalive(pd, body->keepalive);
406 pd_set_presleep(pd, body->presleep, 1);
407 spin_unlock(&g_polling_lock);
411 case OZ_ELT_FAREWELL_REQ: {
412 struct oz_elt_farewell *body =
413 (struct oz_elt_farewell *)(elt + 1);
414 oz_dbg(ON, "RX: OZ_ELT_FAREWELL_REQ\n");
415 oz_add_farewell(pd, body->ep_num,
416 body->index, body->report,
417 elt->length + 1 - sizeof(*body));
420 case OZ_ELT_APP_DATA:
421 if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
422 struct oz_app_hdr *app_hdr =
423 (struct oz_app_hdr *)(elt+1);
426 oz_handle_app_elt(pd, app_hdr->app_id, elt);
430 oz_dbg(ON, "RX: Unknown elt %02x\n", elt->type);
432 elt = oz_next_elt(elt);
439 /*------------------------------------------------------------------------------
442 void oz_protocol_term(void)
444 struct oz_binding *b, *t;
446 /* Walk the list of bindings and remove each one.
448 spin_lock_bh(&g_binding_lock);
449 list_for_each_entry_safe(b, t, &g_binding, link) {
451 spin_unlock_bh(&g_binding_lock);
452 dev_remove_pack(&b->ptype);
454 dev_put(b->ptype.dev);
456 spin_lock_bh(&g_binding_lock);
458 spin_unlock_bh(&g_binding_lock);
459 /* Walk the list of PDs and stop each one. This causes the PD to be
460 * removed from the list so we can just pull each one from the head
463 spin_lock_bh(&g_polling_lock);
464 while (!list_empty(&g_pd_list)) {
466 list_first_entry(&g_pd_list, struct oz_pd, link);
468 spin_unlock_bh(&g_polling_lock);
471 spin_lock_bh(&g_polling_lock);
473 spin_unlock_bh(&g_polling_lock);
474 oz_dbg(ON, "Protocol stopped\n");
476 /*------------------------------------------------------------------------------
479 void oz_pd_heartbeat_handler(unsigned long data)
481 struct oz_pd *pd = (struct oz_pd *)data;
484 spin_lock_bh(&g_polling_lock);
485 if (pd->state & OZ_PD_S_CONNECTED)
486 apps = pd->total_apps;
487 spin_unlock_bh(&g_polling_lock);
489 oz_pd_heartbeat(pd, apps);
492 /*------------------------------------------------------------------------------
495 void oz_pd_timeout_handler(unsigned long data)
498 struct oz_pd *pd = (struct oz_pd *)data;
500 spin_lock_bh(&g_polling_lock);
501 type = pd->timeout_type;
502 spin_unlock_bh(&g_polling_lock);
512 /*------------------------------------------------------------------------------
515 enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer)
519 pd = container_of(timer, struct oz_pd, heartbeat);
520 hrtimer_forward_now(timer, ktime_set(pd->pulse_period /
521 MSEC_PER_SEC, (pd->pulse_period % MSEC_PER_SEC) * NSEC_PER_MSEC));
522 tasklet_schedule(&pd->heartbeat_tasklet);
523 return HRTIMER_RESTART;
525 /*------------------------------------------------------------------------------
528 enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer)
532 pd = container_of(timer, struct oz_pd, timeout);
533 tasklet_schedule(&pd->timeout_tasklet);
534 return HRTIMER_NORESTART;
536 /*------------------------------------------------------------------------------
537 * Context: softirq or process
539 void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time)
541 spin_lock_bh(&g_polling_lock);
545 if (hrtimer_active(&pd->timeout)) {
546 hrtimer_set_expires(&pd->timeout, ktime_set(due_time /
547 MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
549 hrtimer_start_expires(&pd->timeout, HRTIMER_MODE_REL);
551 hrtimer_start(&pd->timeout, ktime_set(due_time /
552 MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
553 NSEC_PER_MSEC), HRTIMER_MODE_REL);
555 pd->timeout_type = type;
557 case OZ_TIMER_HEARTBEAT:
558 if (!hrtimer_active(&pd->heartbeat))
559 hrtimer_start(&pd->heartbeat, ktime_set(due_time /
560 MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
561 NSEC_PER_MSEC), HRTIMER_MODE_REL);
564 spin_unlock_bh(&g_polling_lock);
566 /*------------------------------------------------------------------------------
567 * Context: softirq or process
569 void oz_pd_request_heartbeat(struct oz_pd *pd)
571 oz_timer_add(pd, OZ_TIMER_HEARTBEAT, pd->pulse_period > 0 ?
572 pd->pulse_period : OZ_QUANTUM);
574 /*------------------------------------------------------------------------------
575 * Context: softirq or process
577 struct oz_pd *oz_pd_find(const u8 *mac_addr)
582 spin_lock_bh(&g_polling_lock);
583 list_for_each(e, &g_pd_list) {
584 pd = container_of(e, struct oz_pd, link);
585 if (memcmp(pd->mac_addr, mac_addr, ETH_ALEN) == 0) {
586 atomic_inc(&pd->ref_count);
587 spin_unlock_bh(&g_polling_lock);
591 spin_unlock_bh(&g_polling_lock);
594 /*------------------------------------------------------------------------------
597 void oz_app_enable(int app_id, int enable)
599 if (app_id <= OZ_APPID_MAX) {
600 spin_lock_bh(&g_polling_lock);
602 g_apps |= (1<<app_id);
604 g_apps &= ~(1<<app_id);
605 spin_unlock_bh(&g_polling_lock);
608 /*------------------------------------------------------------------------------
611 static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
612 struct packet_type *pt, struct net_device *orig_dev)
614 skb = skb_share_check(skb, GFP_ATOMIC);
617 spin_lock_bh(&g_rx_queue.lock);
618 if (g_processing_rx) {
619 /* We already hold the lock so use __ variant.
621 __skb_queue_head(&g_rx_queue, skb);
622 spin_unlock_bh(&g_rx_queue.lock);
627 spin_unlock_bh(&g_rx_queue.lock);
629 spin_lock_bh(&g_rx_queue.lock);
630 if (skb_queue_empty(&g_rx_queue)) {
632 spin_unlock_bh(&g_rx_queue.lock);
635 /* We already hold the lock so use __ variant.
637 skb = __skb_dequeue(&g_rx_queue);
642 /*------------------------------------------------------------------------------
645 void oz_binding_add(const char *net_dev)
647 struct oz_binding *binding;
649 binding = kmalloc(sizeof(struct oz_binding), GFP_KERNEL);
651 binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
652 binding->ptype.func = oz_pkt_recv;
653 memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
654 if (net_dev && *net_dev) {
655 oz_dbg(ON, "Adding binding: %s\n", net_dev);
657 dev_get_by_name(&init_net, net_dev);
658 if (binding->ptype.dev == NULL) {
659 oz_dbg(ON, "Netdev %s not found\n", net_dev);
664 oz_dbg(ON, "Binding to all netcards\n");
665 binding->ptype.dev = NULL;
668 dev_add_pack(&binding->ptype);
669 spin_lock_bh(&g_binding_lock);
670 list_add_tail(&binding->link, &g_binding);
671 spin_unlock_bh(&g_binding_lock);
675 /*------------------------------------------------------------------------------
678 static void pd_stop_all_for_device(struct net_device *net_dev)
685 spin_lock_bh(&g_polling_lock);
686 list_for_each_entry_safe(pd, n, &g_pd_list, link) {
687 if (pd->net_dev == net_dev) {
688 list_move(&pd->link, &h);
692 spin_unlock_bh(&g_polling_lock);
693 while (!list_empty(&h)) {
694 pd = list_first_entry(&h, struct oz_pd, link);
699 /*------------------------------------------------------------------------------
702 void oz_binding_remove(const char *net_dev)
704 struct oz_binding *binding;
707 oz_dbg(ON, "Removing binding: %s\n", net_dev);
708 spin_lock_bh(&g_binding_lock);
709 list_for_each_entry(binding, &g_binding, link) {
710 if (strncmp(binding->name, net_dev, OZ_MAX_BINDING_LEN) == 0) {
711 oz_dbg(ON, "Binding '%s' found\n", net_dev);
716 spin_unlock_bh(&g_binding_lock);
718 dev_remove_pack(&binding->ptype);
719 if (binding->ptype.dev) {
720 dev_put(binding->ptype.dev);
721 pd_stop_all_for_device(binding->ptype.dev);
723 list_del(&binding->link);
727 /*------------------------------------------------------------------------------
730 static char *oz_get_next_device_name(char *s, char *dname, int max_size)
734 while (*s && (*s != ',') && max_size > 1) {
741 /*------------------------------------------------------------------------------
744 int oz_protocol_init(char *devs)
746 skb_queue_head_init(&g_rx_queue);
747 if (devs && (devs[0] == '*')) {
748 oz_binding_add(NULL);
752 devs = oz_get_next_device_name(devs, d, sizeof(d));
759 /*------------------------------------------------------------------------------
762 int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
768 spin_lock_bh(&g_polling_lock);
769 list_for_each(e, &g_pd_list) {
770 if (count >= max_count)
772 pd = container_of(e, struct oz_pd, link);
773 memcpy(&addr[count++], pd->mac_addr, ETH_ALEN);
775 spin_unlock_bh(&g_polling_lock);
778 /*------------------------------------------------------------------------------
780 void oz_polling_lock_bh(void)
782 spin_lock_bh(&g_polling_lock);
784 /*------------------------------------------------------------------------------
786 void oz_polling_unlock_bh(void)
788 spin_unlock_bh(&g_polling_lock);