2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 #include <asm/unaligned.h>
24 #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
26 static void ath6kl_htc_mbox_cleanup(struct htc_target *target);
27 static void ath6kl_htc_mbox_stop(struct htc_target *target);
28 static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
29 struct list_head *pkt_queue);
30 static void ath6kl_htc_set_credit_dist(struct htc_target *target,
31 struct ath6kl_htc_credit_info *cred_info,
32 u16 svc_pri_order[], int len);
34 /* threshold to re-enable Tx bundling for an AC*/
35 #define TX_RESUME_BUNDLE_THRESHOLD 1500
37 /* Functions for Tx credit handling */
38 static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
39 struct htc_endpoint_credit_dist *ep_dist,
42 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
43 ep_dist->endpoint, credits);
45 ep_dist->credits += credits;
46 ep_dist->cred_assngd += credits;
47 cred_info->cur_free_credits -= credits;
50 static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
51 struct list_head *ep_list,
54 struct htc_endpoint_credit_dist *cur_ep_dist;
57 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
59 cred_info->cur_free_credits = tot_credits;
60 cred_info->total_avail_credits = tot_credits;
62 list_for_each_entry(cur_ep_dist, ep_list, list) {
63 if (cur_ep_dist->endpoint == ENDPOINT_0)
66 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
68 if (tot_credits > 4) {
69 if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
70 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
71 ath6kl_credit_deposit(cred_info,
73 cur_ep_dist->cred_min);
74 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
78 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
79 ath6kl_credit_deposit(cred_info, cur_ep_dist,
80 cur_ep_dist->cred_min);
82 * Control service is always marked active, it
83 * never goes inactive EVER.
85 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
89 * Streams have to be created (explicit | implicit) for all
90 * kinds of traffic. BE endpoints are also inactive in the
91 * beginning. When BE traffic starts it creates implicit
92 * streams that redistributes credits.
94 * Note: all other endpoints have minimums set but are
95 * initially given NO credits. credits will be distributed
96 * as traffic activity demands
101 * For ath6kl_credit_seek function,
102 * it use list_for_each_entry_reverse to walk around the whole ep list.
103 * Therefore assign this lowestpri_ep_dist after walk around the ep_list
105 cred_info->lowestpri_ep_dist = cur_ep_dist->list;
107 WARN_ON(cred_info->cur_free_credits <= 0);
109 list_for_each_entry(cur_ep_dist, ep_list, list) {
110 if (cur_ep_dist->endpoint == ENDPOINT_0)
113 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
114 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
117 * For the remaining data endpoints, we assume that
118 * each cred_per_msg are the same. We use a simple
119 * calculation here, we take the remaining credits
120 * and determine how many max messages this can
121 * cover and then set each endpoint's normal value
122 * equal to 3/4 this amount.
124 count = (cred_info->cur_free_credits /
125 cur_ep_dist->cred_per_msg)
126 * cur_ep_dist->cred_per_msg;
127 count = (count * 3) >> 2;
128 count = max(count, cur_ep_dist->cred_per_msg);
129 cur_ep_dist->cred_norm = count;
133 ath6kl_dbg(ATH6KL_DBG_CREDIT,
134 "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
135 cur_ep_dist->endpoint,
137 cur_ep_dist->credits,
138 cur_ep_dist->cred_per_msg,
139 cur_ep_dist->cred_norm,
140 cur_ep_dist->cred_min);
144 /* initialize and setup credit distribution */
145 static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target,
146 struct ath6kl_htc_credit_info *cred_info)
148 u16 servicepriority[5];
150 memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
152 servicepriority[0] = WMI_CONTROL_SVC; /* highest */
153 servicepriority[1] = WMI_DATA_VO_SVC;
154 servicepriority[2] = WMI_DATA_VI_SVC;
155 servicepriority[3] = WMI_DATA_BE_SVC;
156 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
158 /* set priority list */
159 ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5);
164 /* reduce an ep's credits back to a set limit */
165 static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
166 struct htc_endpoint_credit_dist *ep_dist,
171 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
172 ep_dist->endpoint, limit);
174 ep_dist->cred_assngd = limit;
176 if (ep_dist->credits <= limit)
179 credits = ep_dist->credits - limit;
180 ep_dist->credits -= credits;
181 cred_info->cur_free_credits += credits;
184 static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
185 struct list_head *epdist_list)
187 struct htc_endpoint_credit_dist *cur_list;
189 list_for_each_entry(cur_list, epdist_list, list) {
190 if (cur_list->endpoint == ENDPOINT_0)
193 if (cur_list->cred_to_dist > 0) {
194 cur_list->credits += cur_list->cred_to_dist;
195 cur_list->cred_to_dist = 0;
197 if (cur_list->credits > cur_list->cred_assngd)
198 ath6kl_credit_reduce(cred_info,
200 cur_list->cred_assngd);
202 if (cur_list->credits > cur_list->cred_norm)
203 ath6kl_credit_reduce(cred_info, cur_list,
204 cur_list->cred_norm);
206 if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) {
207 if (cur_list->txq_depth == 0)
208 ath6kl_credit_reduce(cred_info,
216 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
219 static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
220 struct htc_endpoint_credit_dist *ep_dist)
222 struct htc_endpoint_credit_dist *curdist_list;
226 if (ep_dist->svc_id == WMI_CONTROL_SVC)
229 if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
230 (ep_dist->svc_id == WMI_DATA_VO_SVC))
231 if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
235 * For all other services, we follow a simple algorithm of:
237 * 1. checking the free pool for credits
238 * 2. checking lower priority endpoints for credits to take
241 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
243 if (credits >= ep_dist->seek_cred)
247 * We don't have enough in the free pool, try taking away from
248 * lower priority services The rule for taking away credits:
250 * 1. Only take from lower priority endpoints
251 * 2. Only take what is allocated above the minimum (never
252 * starve an endpoint completely)
253 * 3. Only take what you need.
256 list_for_each_entry_reverse(curdist_list,
257 &cred_info->lowestpri_ep_dist,
259 if (curdist_list == ep_dist)
262 need = ep_dist->seek_cred - cred_info->cur_free_credits;
264 if ((curdist_list->cred_assngd - need) >=
265 curdist_list->cred_min) {
267 * The current one has been allocated more than
268 * it's minimum and it has enough credits assigned
269 * above it's minimum to fulfill our need try to
270 * take away just enough to fulfill our need.
272 ath6kl_credit_reduce(cred_info, curdist_list,
273 curdist_list->cred_assngd - need);
275 if (cred_info->cur_free_credits >=
280 if (curdist_list->endpoint == ENDPOINT_0)
284 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
287 /* did we find some credits? */
289 ath6kl_credit_deposit(cred_info, ep_dist, credits);
291 ep_dist->seek_cred = 0;
294 /* redistribute credits based on activity change */
295 static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
296 struct list_head *ep_dist_list)
298 struct htc_endpoint_credit_dist *curdist_list;
300 list_for_each_entry(curdist_list, ep_dist_list, list) {
301 if (curdist_list->endpoint == ENDPOINT_0)
304 if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
305 (curdist_list->svc_id == WMI_DATA_BE_SVC))
306 curdist_list->dist_flags |= HTC_EP_ACTIVE;
308 if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
309 !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
310 if (curdist_list->txq_depth == 0)
311 ath6kl_credit_reduce(info, curdist_list, 0);
313 ath6kl_credit_reduce(info,
315 curdist_list->cred_min);
322 * This function is invoked whenever endpoints require credit
323 * distributions. A lock is held while this function is invoked, this
324 * function shall NOT block. The ep_dist_list is a list of distribution
325 * structures in prioritized order as defined by the call to the
326 * htc_set_credit_dist() api.
328 static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
329 struct list_head *ep_dist_list,
330 enum htc_credit_dist_reason reason)
333 case HTC_CREDIT_DIST_SEND_COMPLETE:
334 ath6kl_credit_update(cred_info, ep_dist_list);
336 case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
337 ath6kl_credit_redistribute(cred_info, ep_dist_list);
343 WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
344 WARN_ON(cred_info->cur_free_credits < 0);
347 static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
351 if (!IS_ALIGNED((unsigned long) *buf, 4)) {
352 align_addr = PTR_ALIGN(*buf - 4, 4);
353 memmove(align_addr, *buf, len);
358 static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
359 int ctrl0, int ctrl1)
361 struct htc_frame_hdr *hdr;
363 packet->buf -= HTC_HDR_LENGTH;
364 hdr = (struct htc_frame_hdr *)packet->buf;
367 put_unaligned((u16)packet->act_len, &hdr->payld_len);
369 hdr->eid = packet->endpoint;
370 hdr->ctrl[0] = ctrl0;
371 hdr->ctrl[1] = ctrl1;
374 static void htc_reclaim_txctrl_buf(struct htc_target *target,
375 struct htc_packet *pkt)
377 spin_lock_bh(&target->htc_lock);
378 list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
379 spin_unlock_bh(&target->htc_lock);
382 static struct htc_packet *htc_get_control_buf(struct htc_target *target,
385 struct htc_packet *packet = NULL;
386 struct list_head *buf_list;
388 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
390 spin_lock_bh(&target->htc_lock);
392 if (list_empty(buf_list)) {
393 spin_unlock_bh(&target->htc_lock);
397 packet = list_first_entry(buf_list, struct htc_packet, list);
398 list_del(&packet->list);
399 spin_unlock_bh(&target->htc_lock);
402 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
407 static void htc_tx_comp_update(struct htc_target *target,
408 struct htc_endpoint *endpoint,
409 struct htc_packet *packet)
411 packet->completion = NULL;
412 packet->buf += HTC_HDR_LENGTH;
417 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
418 packet->status, packet->endpoint, packet->act_len,
419 packet->info.tx.cred_used);
421 /* on failure to submit, reclaim credits for this packet */
422 spin_lock_bh(&target->tx_lock);
423 endpoint->cred_dist.cred_to_dist +=
424 packet->info.tx.cred_used;
425 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
427 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
428 target->credit_info, &target->cred_dist_list);
430 ath6kl_credit_distribute(target->credit_info,
431 &target->cred_dist_list,
432 HTC_CREDIT_DIST_SEND_COMPLETE);
434 spin_unlock_bh(&target->tx_lock);
437 static void htc_tx_complete(struct htc_endpoint *endpoint,
438 struct list_head *txq)
443 ath6kl_dbg(ATH6KL_DBG_HTC,
444 "htc tx complete ep %d pkts %d\n",
445 endpoint->eid, get_queue_depth(txq));
447 ath6kl_tx_complete(endpoint->target, txq);
450 static void htc_tx_comp_handler(struct htc_target *target,
451 struct htc_packet *packet)
453 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
454 struct list_head container;
456 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
457 packet->info.tx.seqno);
459 htc_tx_comp_update(target, endpoint, packet);
460 INIT_LIST_HEAD(&container);
461 list_add_tail(&packet->list, &container);
463 htc_tx_complete(endpoint, &container);
466 static void htc_async_tx_scat_complete(struct htc_target *target,
467 struct hif_scatter_req *scat_req)
469 struct htc_endpoint *endpoint;
470 struct htc_packet *packet;
471 struct list_head tx_compq;
474 INIT_LIST_HEAD(&tx_compq);
476 ath6kl_dbg(ATH6KL_DBG_HTC,
477 "htc tx scat complete len %d entries %d\n",
478 scat_req->len, scat_req->scat_entries);
480 if (scat_req->status)
481 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
483 packet = scat_req->scat_list[0].packet;
484 endpoint = &target->endpoint[packet->endpoint];
486 /* walk through the scatter list and process */
487 for (i = 0; i < scat_req->scat_entries; i++) {
488 packet = scat_req->scat_list[i].packet;
494 packet->status = scat_req->status;
495 htc_tx_comp_update(target, endpoint, packet);
496 list_add_tail(&packet->list, &tx_compq);
499 /* free scatter request */
500 hif_scatter_req_add(target->dev->ar, scat_req);
502 /* complete all packets */
503 htc_tx_complete(endpoint, &tx_compq);
506 static int ath6kl_htc_tx_issue(struct htc_target *target,
507 struct htc_packet *packet)
511 u32 padded_len, send_len;
513 if (!packet->completion)
516 send_len = packet->act_len + HTC_HDR_LENGTH;
518 padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
520 ath6kl_dbg(ATH6KL_DBG_HTC,
521 "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
522 send_len, packet->info.tx.seqno, padded_len,
523 target->dev->ar->mbox_info.htc_addr,
524 sync ? "sync" : "async");
527 status = hif_read_write_sync(target->dev->ar,
528 target->dev->ar->mbox_info.htc_addr,
529 packet->buf, padded_len,
530 HIF_WR_SYNC_BLOCK_INC);
532 packet->status = status;
533 packet->buf += HTC_HDR_LENGTH;
535 status = hif_write_async(target->dev->ar,
536 target->dev->ar->mbox_info.htc_addr,
537 packet->buf, padded_len,
538 HIF_WR_ASYNC_BLOCK_INC, packet);
543 static int htc_check_credits(struct htc_target *target,
544 struct htc_endpoint *ep, u8 *flags,
545 enum htc_endpoint_id eid, unsigned int len,
549 *req_cred = (len > target->tgt_cred_sz) ?
550 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
552 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
553 *req_cred, ep->cred_dist.credits);
555 if (ep->cred_dist.credits < *req_cred) {
556 if (eid == ENDPOINT_0)
559 /* Seek more credits */
560 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
562 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
564 ep->cred_dist.seek_cred = 0;
566 if (ep->cred_dist.credits < *req_cred) {
567 ath6kl_dbg(ATH6KL_DBG_CREDIT,
568 "credit not found for ep %d\n",
574 ep->cred_dist.credits -= *req_cred;
575 ep->ep_st.cred_cosumd += *req_cred;
577 /* When we are getting low on credits, ask for more */
578 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
579 ep->cred_dist.seek_cred =
580 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
582 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
584 /* see if we were successful in getting more */
585 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
586 /* tell the target we need credits ASAP! */
587 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
588 ep->ep_st.cred_low_indicate += 1;
589 ath6kl_dbg(ATH6KL_DBG_CREDIT,
590 "credit we need credits asap\n");
597 static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
598 struct htc_endpoint *endpoint,
599 struct list_head *queue)
603 struct htc_packet *packet;
610 if (list_empty(&endpoint->txq))
612 packet = list_first_entry(&endpoint->txq, struct htc_packet,
615 ath6kl_dbg(ATH6KL_DBG_HTC,
616 "htc tx got packet 0x%p queue depth %d\n",
617 packet, get_queue_depth(&endpoint->txq));
619 len = CALC_TXRX_PADDED_LEN(target,
620 packet->act_len + HTC_HDR_LENGTH);
622 if (htc_check_credits(target, endpoint, &flags,
623 packet->endpoint, len, &req_cred))
626 /* now we can fully move onto caller's queue */
627 packet = list_first_entry(&endpoint->txq, struct htc_packet,
629 list_move_tail(&packet->list, queue);
631 /* save the number of credits this packet consumed */
632 packet->info.tx.cred_used = req_cred;
634 /* all TX packets are handled asynchronously */
635 packet->completion = htc_tx_comp_handler;
636 packet->context = target;
637 endpoint->ep_st.tx_issued += 1;
639 /* save send flags */
640 packet->info.tx.flags = flags;
641 packet->info.tx.seqno = endpoint->seqno;
646 /* See if the padded tx length falls on a credit boundary */
647 static int htc_get_credit_padding(unsigned int cred_sz, int *len,
648 struct htc_endpoint *ep)
650 int rem_cred, cred_pad;
652 rem_cred = *len % cred_sz;
654 /* No padding needed */
658 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
662 * The transfer consumes a "partial" credit, this
663 * packet cannot be bundled unless we add
664 * additional "dummy" padding (max 255 bytes) to
665 * consume the entire credit.
667 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
669 if ((cred_pad > 0) && (cred_pad <= 255))
672 /* The amount of padding is too large, send as non-bundled */
678 static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
679 struct htc_endpoint *endpoint,
680 struct hif_scatter_req *scat_req,
682 struct list_head *queue)
684 struct htc_packet *packet;
685 int i, len, rem_scat, cred_pad;
689 rem_scat = target->max_tx_bndl_sz;
691 for (i = 0; i < n_scat; i++) {
692 scat_req->scat_list[i].packet = NULL;
694 if (list_empty(queue))
697 packet = list_first_entry(queue, struct htc_packet, list);
698 len = CALC_TXRX_PADDED_LEN(target,
699 packet->act_len + HTC_HDR_LENGTH);
701 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
703 if (cred_pad < 0 || rem_scat < len) {
709 /* now remove it from the queue */
710 list_del(&packet->list);
712 scat_req->scat_list[i].packet = packet;
713 /* prepare packet and flag message as part of a send bundle */
714 flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE;
715 ath6kl_htc_tx_prep_pkt(packet, flags,
716 cred_pad, packet->info.tx.seqno);
717 /* Make sure the buffer is 4-byte aligned */
718 ath6kl_htc_tx_buf_align(&packet->buf,
719 packet->act_len + HTC_HDR_LENGTH);
720 scat_req->scat_list[i].buf = packet->buf;
721 scat_req->scat_list[i].len = len;
723 scat_req->len += len;
724 scat_req->scat_entries++;
725 ath6kl_dbg(ATH6KL_DBG_HTC,
726 "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
727 i, packet, packet->info.tx.seqno, len, rem_scat);
730 /* Roll back scatter setup in case of any failure */
731 if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
732 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
733 packet = scat_req->scat_list[i].packet;
735 packet->buf += HTC_HDR_LENGTH;
736 list_add(&packet->list, queue);
746 * Drain a queue and send as bundles this function may return without fully
747 * draining the queue when
749 * 1. scatter resources are exhausted
750 * 2. a message that will consume a partial credit will stop the
751 * bundling process early
752 * 3. we drop below the minimum number of messages for a bundle
754 static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
755 struct list_head *queue,
756 int *sent_bundle, int *n_bundle_pkts)
758 struct htc_target *target = endpoint->target;
759 struct hif_scatter_req *scat_req = NULL;
760 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
765 if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
766 (WMI_CONTROL_SVC != endpoint->svc_id))
767 ac = target->dev->ar->ep2ac_map[endpoint->eid];
771 n_scat = get_queue_depth(queue);
772 n_scat = min(n_scat, target->msg_per_bndl_max);
774 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
775 /* not enough to bundle */
778 scat_req = hif_scatter_req_get(target->dev->ar);
781 /* no scatter resources */
782 ath6kl_dbg(ATH6KL_DBG_HTC,
783 "htc tx no more scatter resources\n");
787 if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) {
790 * BE, BK have priorities and bit
793 txb_mask = (1 << WMM_AC_BK);
796 * any AC with priority lower than
799 txb_mask = ((1 << ac) - 1);
802 * when the scatter request resources drop below a
803 * certain threshold, disable Tx bundling for all
804 * AC's with priority lower than the current requesting
805 * AC. Otherwise re-enable Tx bundling for them
807 if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
808 target->tx_bndl_mask &= ~txb_mask;
810 target->tx_bndl_mask |= txb_mask;
813 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
817 scat_req->scat_entries = 0;
819 status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
822 if (status == -EAGAIN) {
823 hif_scatter_req_add(target->dev->ar, scat_req);
827 /* send path is always asynchronous */
828 scat_req->complete = htc_async_tx_scat_complete;
830 tot_pkts_bundle += scat_req->scat_entries;
832 ath6kl_dbg(ATH6KL_DBG_HTC,
833 "htc tx scatter bytes %d entries %d\n",
834 scat_req->len, scat_req->scat_entries);
835 ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
841 *sent_bundle = n_sent_bundle;
842 *n_bundle_pkts = tot_pkts_bundle;
843 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
849 static void ath6kl_htc_tx_from_queue(struct htc_target *target,
850 struct htc_endpoint *endpoint)
852 struct list_head txq;
853 struct htc_packet *packet;
859 spin_lock_bh(&target->tx_lock);
861 endpoint->tx_proc_cnt++;
862 if (endpoint->tx_proc_cnt > 1) {
863 endpoint->tx_proc_cnt--;
864 spin_unlock_bh(&target->tx_lock);
865 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
870 * drain the endpoint TX queue for transmission as long
871 * as we have enough credits.
873 INIT_LIST_HEAD(&txq);
875 if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
876 (WMI_CONTROL_SVC != endpoint->svc_id))
877 ac = target->dev->ar->ep2ac_map[endpoint->eid];
881 if (list_empty(&endpoint->txq))
884 ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
886 if (list_empty(&txq))
889 spin_unlock_bh(&target->tx_lock);
895 /* try to send a bundle on each pass */
896 if ((target->tx_bndl_mask) &&
897 (get_queue_depth(&txq) >=
898 HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
899 int temp1 = 0, temp2 = 0;
901 /* check if bundling is enabled for an AC */
902 if (target->tx_bndl_mask & (1 << ac)) {
903 ath6kl_htc_tx_bundle(endpoint, &txq,
905 bundle_sent += temp1;
906 n_pkts_bundle += temp2;
910 if (list_empty(&txq))
913 packet = list_first_entry(&txq, struct htc_packet,
915 list_del(&packet->list);
917 ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
918 0, packet->info.tx.seqno);
919 status = ath6kl_htc_tx_issue(target, packet);
922 packet->status = status;
923 packet->completion(packet->context, packet);
927 spin_lock_bh(&target->tx_lock);
929 endpoint->ep_st.tx_bundles += bundle_sent;
930 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
933 * if an AC has bundling disabled and no tx bundling
934 * has occured continously for a certain number of TX,
935 * enable tx bundling for this AC
938 if (!(target->tx_bndl_mask & (1 << ac)) &&
940 if (++target->ac_tx_count[ac] >=
941 TX_RESUME_BUNDLE_THRESHOLD) {
942 target->ac_tx_count[ac] = 0;
943 target->tx_bndl_mask |= (1 << ac);
947 /* tx bundling will reset the counter */
949 target->ac_tx_count[ac] = 0;
953 endpoint->tx_proc_cnt = 0;
954 spin_unlock_bh(&target->tx_lock);
957 static bool ath6kl_htc_tx_try(struct htc_target *target,
958 struct htc_endpoint *endpoint,
959 struct htc_packet *tx_pkt)
961 struct htc_ep_callbacks ep_cb;
963 bool overflow = false;
965 ep_cb = endpoint->ep_cb;
967 spin_lock_bh(&target->tx_lock);
968 txq_depth = get_queue_depth(&endpoint->txq);
969 spin_unlock_bh(&target->tx_lock);
971 if (txq_depth >= endpoint->max_txq_depth)
975 ath6kl_dbg(ATH6KL_DBG_HTC,
976 "htc tx overflow ep %d depth %d max %d\n",
977 endpoint->eid, txq_depth,
978 endpoint->max_txq_depth);
980 if (overflow && ep_cb.tx_full) {
981 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
982 HTC_SEND_FULL_DROP) {
983 endpoint->ep_st.tx_dropped += 1;
988 spin_lock_bh(&target->tx_lock);
989 list_add_tail(&tx_pkt->list, &endpoint->txq);
990 spin_unlock_bh(&target->tx_lock);
992 ath6kl_htc_tx_from_queue(target, endpoint);
997 static void htc_chk_ep_txq(struct htc_target *target)
999 struct htc_endpoint *endpoint;
1000 struct htc_endpoint_credit_dist *cred_dist;
1003 * Run through the credit distribution list to see if there are
1004 * packets queued. NOTE: no locks need to be taken since the
1005 * distribution list is not dynamic (cannot be re-ordered) and we
1006 * are not modifying any state.
1008 list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
1009 endpoint = cred_dist->htc_ep;
1011 spin_lock_bh(&target->tx_lock);
1012 if (!list_empty(&endpoint->txq)) {
1013 ath6kl_dbg(ATH6KL_DBG_HTC,
1014 "htc creds ep %d credits %d pkts %d\n",
1015 cred_dist->endpoint,
1016 endpoint->cred_dist.credits,
1017 get_queue_depth(&endpoint->txq));
1018 spin_unlock_bh(&target->tx_lock);
1020 * Try to start the stalled queue, this list is
1021 * ordered by priority. If there are credits
1022 * available the highest priority queue will get a
1023 * chance to reclaim credits from lower priority
1026 ath6kl_htc_tx_from_queue(target, endpoint);
1027 spin_lock_bh(&target->tx_lock);
1029 spin_unlock_bh(&target->tx_lock);
1033 static int htc_setup_tx_complete(struct htc_target *target)
1035 struct htc_packet *send_pkt = NULL;
1038 send_pkt = htc_get_control_buf(target, true);
1043 if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
1044 struct htc_setup_comp_ext_msg *setup_comp_ext;
1048 (struct htc_setup_comp_ext_msg *)send_pkt->buf;
1049 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
1050 setup_comp_ext->msg_id =
1051 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
1053 if (target->msg_per_bndl_max > 0) {
1054 /* Indicate HTC bundling to the target */
1055 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
1056 setup_comp_ext->msg_per_rxbndl =
1057 target->msg_per_bndl_max;
1060 memcpy(&setup_comp_ext->flags, &flags,
1061 sizeof(setup_comp_ext->flags));
1062 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
1063 sizeof(struct htc_setup_comp_ext_msg),
1064 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1067 struct htc_setup_comp_msg *setup_comp;
1068 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
1069 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
1070 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
1071 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
1072 sizeof(struct htc_setup_comp_msg),
1073 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1076 /* we want synchronous operation */
1077 send_pkt->completion = NULL;
1078 ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
1079 status = ath6kl_htc_tx_issue(target, send_pkt);
1081 if (send_pkt != NULL)
1082 htc_reclaim_txctrl_buf(target, send_pkt);
1087 static void ath6kl_htc_set_credit_dist(struct htc_target *target,
1088 struct ath6kl_htc_credit_info *credit_info,
1089 u16 srvc_pri_order[], int list_len)
1091 struct htc_endpoint *endpoint;
1094 target->credit_info = credit_info;
1096 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
1097 &target->cred_dist_list);
1099 for (i = 0; i < list_len; i++) {
1100 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
1101 endpoint = &target->endpoint[ep];
1102 if (endpoint->svc_id == srvc_pri_order[i]) {
1103 list_add_tail(&endpoint->cred_dist.list,
1104 &target->cred_dist_list);
1108 if (ep >= ENDPOINT_MAX) {
1115 static int ath6kl_htc_mbox_tx(struct htc_target *target,
1116 struct htc_packet *packet)
1118 struct htc_endpoint *endpoint;
1119 struct list_head queue;
1121 ath6kl_dbg(ATH6KL_DBG_HTC,
1122 "htc tx ep id %d buf 0x%p len %d\n",
1123 packet->endpoint, packet->buf, packet->act_len);
1125 if (packet->endpoint >= ENDPOINT_MAX) {
1130 endpoint = &target->endpoint[packet->endpoint];
1132 if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
1133 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
1134 -ECANCELED : -ENOSPC;
1135 INIT_LIST_HEAD(&queue);
1136 list_add(&packet->list, &queue);
1137 htc_tx_complete(endpoint, &queue);
1143 /* flush endpoint TX queue */
1144 static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
1145 enum htc_endpoint_id eid, u16 tag)
1147 struct htc_packet *packet, *tmp_pkt;
1148 struct list_head discard_q, container;
1149 struct htc_endpoint *endpoint = &target->endpoint[eid];
1151 if (!endpoint->svc_id) {
1156 /* initialize the discard queue */
1157 INIT_LIST_HEAD(&discard_q);
1159 spin_lock_bh(&target->tx_lock);
1161 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
1162 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
1163 (tag == packet->info.tx.tag))
1164 list_move_tail(&packet->list, &discard_q);
1167 spin_unlock_bh(&target->tx_lock);
1169 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
1170 packet->status = -ECANCELED;
1171 list_del(&packet->list);
1172 ath6kl_dbg(ATH6KL_DBG_HTC,
1173 "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
1174 packet, packet->act_len,
1175 packet->endpoint, packet->info.tx.tag);
1177 INIT_LIST_HEAD(&container);
1178 list_add_tail(&packet->list, &container);
1179 htc_tx_complete(endpoint, &container);
1184 static void ath6kl_htc_flush_txep_all(struct htc_target *target)
1186 struct htc_endpoint *endpoint;
1189 dump_cred_dist_stats(target);
1191 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1192 endpoint = &target->endpoint[i];
1193 if (endpoint->svc_id == 0)
1196 ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
1200 static void ath6kl_htc_mbox_activity_changed(struct htc_target *target,
1201 enum htc_endpoint_id eid,
1204 struct htc_endpoint *endpoint = &target->endpoint[eid];
1207 if (endpoint->svc_id == 0) {
1212 spin_lock_bh(&target->tx_lock);
1215 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
1216 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
1220 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
1221 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
1227 endpoint->cred_dist.txq_depth =
1228 get_queue_depth(&endpoint->txq);
1230 ath6kl_dbg(ATH6KL_DBG_HTC,
1231 "htc tx activity ctxt 0x%p dist 0x%p\n",
1232 target->credit_info, &target->cred_dist_list);
1234 ath6kl_credit_distribute(target->credit_info,
1235 &target->cred_dist_list,
1236 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
1239 spin_unlock_bh(&target->tx_lock);
1241 if (dist && !active)
1242 htc_chk_ep_txq(target);
1247 static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
1250 endpoint->ep_st.rx_pkts++;
1251 if (n_look_ahds == 1)
1252 endpoint->ep_st.rx_lkahds++;
1253 else if (n_look_ahds > 1)
1254 endpoint->ep_st.rx_bundle_lkahd++;
1257 static inline bool htc_valid_rx_frame_len(struct htc_target *target,
1258 enum htc_endpoint_id eid, int len)
1260 return (eid == target->dev->ar->ctrl_ep) ?
1261 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
1264 static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
1266 struct list_head queue;
1268 INIT_LIST_HEAD(&queue);
1269 list_add_tail(&packet->list, &queue);
1270 return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue);
1273 static void htc_reclaim_rxbuf(struct htc_target *target,
1274 struct htc_packet *packet,
1275 struct htc_endpoint *ep)
1277 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
1278 htc_rxpkt_reset(packet);
1279 packet->status = -ECANCELED;
1280 ep->ep_cb.rx(ep->target, packet);
1282 htc_rxpkt_reset(packet);
1283 htc_add_rxbuf((void *)(target), packet);
1287 static void reclaim_rx_ctrl_buf(struct htc_target *target,
1288 struct htc_packet *packet)
1290 spin_lock_bh(&target->htc_lock);
1291 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
1292 spin_unlock_bh(&target->htc_lock);
1295 static int ath6kl_htc_rx_packet(struct htc_target *target,
1296 struct htc_packet *packet,
1299 struct ath6kl_device *dev = target->dev;
1303 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
1305 if (padded_len > packet->buf_len) {
1306 ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
1307 padded_len, rx_len, packet->buf_len);
1311 ath6kl_dbg(ATH6KL_DBG_HTC,
1312 "htc rx 0x%p hdr 0x%x len %d mbox 0x%x\n",
1313 packet, packet->info.rx.exp_hdr,
1314 padded_len, dev->ar->mbox_info.htc_addr);
1316 status = hif_read_write_sync(dev->ar,
1317 dev->ar->mbox_info.htc_addr,
1318 packet->buf, padded_len,
1319 HIF_RD_SYNC_BLOCK_FIX);
1321 packet->status = status;
1327 * optimization for recv packets, we can indicate a
1328 * "hint" that there are more single-packets to fetch
1331 static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
1332 struct htc_endpoint *endpoint,
1333 struct htc_packet *packet)
1335 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
1337 if (htc_hdr->eid == packet->endpoint) {
1338 if (!list_empty(&endpoint->rx_bufq))
1339 packet->info.rx.indicat_flags |=
1340 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1344 static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
1346 struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
1348 if (ep_cb.rx_refill_thresh > 0) {
1349 spin_lock_bh(&endpoint->target->rx_lock);
1350 if (get_queue_depth(&endpoint->rx_bufq)
1351 < ep_cb.rx_refill_thresh) {
1352 spin_unlock_bh(&endpoint->target->rx_lock);
1353 ep_cb.rx_refill(endpoint->target, endpoint->eid);
1356 spin_unlock_bh(&endpoint->target->rx_lock);
1360 /* This function is called with rx_lock held */
1361 static int ath6kl_htc_rx_setup(struct htc_target *target,
1362 struct htc_endpoint *ep,
1363 u32 *lk_ahds, struct list_head *queue, int n_msg)
1365 struct htc_packet *packet;
1366 /* FIXME: type of lk_ahds can't be right */
1367 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
1368 struct htc_ep_callbacks ep_cb;
1369 int status = 0, j, full_len;
1372 full_len = CALC_TXRX_PADDED_LEN(target,
1373 le16_to_cpu(htc_hdr->payld_len) +
1376 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
1377 ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n",
1378 htc_hdr->eid, htc_hdr->flags,
1379 le16_to_cpu(htc_hdr->payld_len));
1384 for (j = 0; j < n_msg; j++) {
1387 * Reset flag, any packets allocated using the
1388 * rx_alloc() API cannot be recycled on
1389 * cleanup,they must be explicitly returned.
1393 if (ep_cb.rx_allocthresh &&
1394 (full_len > ep_cb.rx_alloc_thresh)) {
1395 ep->ep_st.rx_alloc_thresh_hit += 1;
1396 ep->ep_st.rxalloc_thresh_byte +=
1397 le16_to_cpu(htc_hdr->payld_len);
1399 spin_unlock_bh(&target->rx_lock);
1402 packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1404 spin_lock_bh(&target->rx_lock);
1406 /* refill handler is being used */
1407 if (list_empty(&ep->rx_bufq)) {
1408 if (ep_cb.rx_refill) {
1409 spin_unlock_bh(&target->rx_lock);
1410 ep_cb.rx_refill(ep->target, ep->eid);
1411 spin_lock_bh(&target->rx_lock);
1415 if (list_empty(&ep->rx_bufq))
1418 packet = list_first_entry(&ep->rx_bufq,
1419 struct htc_packet, list);
1420 list_del(&packet->list);
1425 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1426 target->ep_waiting = ep->eid;
1431 packet->info.rx.rx_flags = 0;
1432 packet->info.rx.indicat_flags = 0;
1437 * flag that these packets cannot be
1438 * recycled, they have to be returned to
1441 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1443 /* Caller needs to free this upon any failure */
1444 list_add_tail(&packet->list, queue);
1446 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1447 status = -ECANCELED;
1452 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1453 packet->info.rx.exp_hdr = 0xFFFFFFFF;
1455 /* set expected look ahead */
1456 packet->info.rx.exp_hdr = *lk_ahds;
1458 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1465 static int ath6kl_htc_rx_alloc(struct htc_target *target,
1466 u32 lk_ahds[], int msg,
1467 struct htc_endpoint *endpoint,
1468 struct list_head *queue)
1471 struct htc_packet *packet, *tmp_pkt;
1472 struct htc_frame_hdr *htc_hdr;
1475 spin_lock_bh(&target->rx_lock);
1477 for (i = 0; i < msg; i++) {
1479 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1481 if (htc_hdr->eid >= ENDPOINT_MAX) {
1482 ath6kl_err("invalid ep in look-ahead: %d\n",
1488 if (htc_hdr->eid != endpoint->eid) {
1489 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1490 htc_hdr->eid, endpoint->eid, i);
1495 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1496 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1498 (u32) HTC_MAX_PAYLOAD_LENGTH);
1503 if (endpoint->svc_id == 0) {
1504 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1509 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1511 * HTC header indicates that every packet to follow
1512 * has the same padded length so that it can be
1513 * optimally fetched as a full bundle.
1515 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1516 HTC_FLG_RX_BNDL_CNT_S;
1518 /* the count doesn't include the starter frame */
1520 if (n_msg > target->msg_per_bndl_max) {
1525 endpoint->ep_st.rx_bundle_from_hdr += 1;
1526 ath6kl_dbg(ATH6KL_DBG_HTC,
1527 "htc rx bundle pkts %d\n",
1530 /* HTC header only indicates 1 message to fetch */
1533 /* Setup packet buffers for each message */
1534 status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
1538 * This is due to unavailabilty of buffers to rx entire data.
1539 * Return no error so that free buffers from queue can be used
1540 * to receive partial data.
1542 if (status == -ENOSPC) {
1543 spin_unlock_bh(&target->rx_lock);
1551 spin_unlock_bh(&target->rx_lock);
1554 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1555 list_del(&packet->list);
1556 htc_reclaim_rxbuf(target, packet,
1557 &target->endpoint[packet->endpoint]);
1564 static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1566 if (packets->endpoint != ENDPOINT_0) {
1571 if (packets->status == -ECANCELED) {
1572 reclaim_rx_ctrl_buf(context, packets);
1576 if (packets->act_len > 0) {
1577 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1578 packets->act_len + HTC_HDR_LENGTH);
1580 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1581 "htc rx unexpected endpoint 0 message", "",
1582 packets->buf - HTC_HDR_LENGTH,
1583 packets->act_len + HTC_HDR_LENGTH);
1586 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1589 static void htc_proc_cred_rpt(struct htc_target *target,
1590 struct htc_credit_report *rpt,
1592 enum htc_endpoint_id from_ep)
1594 struct htc_endpoint *endpoint;
1595 int tot_credits = 0, i;
1598 spin_lock_bh(&target->tx_lock);
1600 for (i = 0; i < n_entries; i++, rpt++) {
1601 if (rpt->eid >= ENDPOINT_MAX) {
1603 spin_unlock_bh(&target->tx_lock);
1607 endpoint = &target->endpoint[rpt->eid];
1609 ath6kl_dbg(ATH6KL_DBG_CREDIT,
1610 "credit report ep %d credits %d\n",
1611 rpt->eid, rpt->credits);
1613 endpoint->ep_st.tx_cred_rpt += 1;
1614 endpoint->ep_st.cred_retnd += rpt->credits;
1616 if (from_ep == rpt->eid) {
1618 * This credit report arrived on the same endpoint
1619 * indicating it arrived in an RX packet.
1621 endpoint->ep_st.cred_from_rx += rpt->credits;
1622 endpoint->ep_st.cred_rpt_from_rx += 1;
1623 } else if (from_ep == ENDPOINT_0) {
1624 /* credit arrived on endpoint 0 as a NULL message */
1625 endpoint->ep_st.cred_from_ep0 += rpt->credits;
1626 endpoint->ep_st.cred_rpt_ep0 += 1;
1628 endpoint->ep_st.cred_from_other += rpt->credits;
1629 endpoint->ep_st.cred_rpt_from_other += 1;
1632 if (rpt->eid == ENDPOINT_0)
1633 /* always give endpoint 0 credits back */
1634 endpoint->cred_dist.credits += rpt->credits;
1636 endpoint->cred_dist.cred_to_dist += rpt->credits;
1641 * Refresh tx depth for distribution function that will
1642 * recover these credits NOTE: this is only valid when
1643 * there are credits to recover!
1645 endpoint->cred_dist.txq_depth =
1646 get_queue_depth(&endpoint->txq);
1648 tot_credits += rpt->credits;
1653 * This was a credit return based on a completed send
1654 * operations note, this is done with the lock held
1656 ath6kl_credit_distribute(target->credit_info,
1657 &target->cred_dist_list,
1658 HTC_CREDIT_DIST_SEND_COMPLETE);
1661 spin_unlock_bh(&target->tx_lock);
1664 htc_chk_ep_txq(target);
1667 static int htc_parse_trailer(struct htc_target *target,
1668 struct htc_record_hdr *record,
1669 u8 *record_buf, u32 *next_lk_ahds,
1670 enum htc_endpoint_id endpoint,
1673 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1674 struct htc_lookahead_report *lk_ahd;
1677 switch (record->rec_id) {
1678 case HTC_RECORD_CREDITS:
1679 len = record->len / sizeof(struct htc_credit_report);
1685 htc_proc_cred_rpt(target,
1686 (struct htc_credit_report *) record_buf,
1689 case HTC_RECORD_LOOKAHEAD:
1690 len = record->len / sizeof(*lk_ahd);
1696 lk_ahd = (struct htc_lookahead_report *) record_buf;
1697 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
1700 ath6kl_dbg(ATH6KL_DBG_HTC,
1701 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
1702 lk_ahd->pre_valid, lk_ahd->post_valid);
1704 /* look ahead bytes are valid, copy them over */
1705 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1707 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1708 "htc rx next look ahead",
1709 "", next_lk_ahds, 4);
1714 case HTC_RECORD_LOOKAHEAD_BUNDLE:
1715 len = record->len / sizeof(*bundle_lkahd_rpt);
1716 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1725 (struct htc_bundle_lkahd_rpt *) record_buf;
1727 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
1728 "", record_buf, record->len);
1730 for (i = 0; i < len; i++) {
1731 memcpy((u8 *)&next_lk_ahds[i],
1732 bundle_lkahd_rpt->lk_ahd, 4);
1740 ath6kl_err("unhandled record: id:%d len:%d\n",
1741 record->rec_id, record->len);
1749 static int htc_proc_trailer(struct htc_target *target,
1750 u8 *buf, int len, u32 *next_lk_ahds,
1751 int *n_lk_ahds, enum htc_endpoint_id endpoint)
1753 struct htc_record_hdr *record;
1759 ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
1760 ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
1768 if (len < sizeof(struct htc_record_hdr)) {
1772 /* these are byte aligned structs */
1773 record = (struct htc_record_hdr *) buf;
1774 len -= sizeof(struct htc_record_hdr);
1775 buf += sizeof(struct htc_record_hdr);
1777 if (record->len > len) {
1778 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1779 record->len, record->rec_id, len);
1785 status = htc_parse_trailer(target, record, record_buf,
1786 next_lk_ahds, endpoint, n_lk_ahds);
1791 /* advance buffer past this record for next time around */
1797 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
1798 "", orig_buf, orig_len);
1803 static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1804 struct htc_packet *packet,
1805 u32 *next_lkahds, int *n_lkahds)
1810 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1812 if (n_lkahds != NULL)
1816 * NOTE: we cannot assume the alignment of buf, so we use the safe
1817 * macros to retrieve 16 bit fields.
1819 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1821 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1823 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1825 * Refresh the expected header and the actual length as it
1826 * was unknown when this packet was grabbed as part of the
1829 packet->info.rx.exp_hdr = lk_ahd;
1830 packet->act_len = payload_len + HTC_HDR_LENGTH;
1832 /* validate the actual header that was refreshed */
1833 if (packet->act_len > packet->buf_len) {
1834 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1835 payload_len, lk_ahd);
1837 * Limit this to max buffer just to print out some
1840 packet->act_len = min(packet->act_len, packet->buf_len);
1845 if (packet->endpoint != htc_hdr->eid) {
1846 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1847 htc_hdr->eid, packet->endpoint);
1853 if (lk_ahd != packet->info.rx.exp_hdr) {
1854 ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1855 __func__, packet, packet->info.rx.rx_flags);
1856 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
1857 "", &packet->info.rx.exp_hdr, 4);
1858 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
1859 "", (u8 *)&lk_ahd, sizeof(lk_ahd));
1864 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1865 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1866 htc_hdr->ctrl[0] > payload_len) {
1867 ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1868 __func__, payload_len, htc_hdr->ctrl[0]);
1873 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1878 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1879 + payload_len - htc_hdr->ctrl[0],
1880 htc_hdr->ctrl[0], next_lkahds,
1881 n_lkahds, packet->endpoint);
1886 packet->act_len -= htc_hdr->ctrl[0];
1889 packet->buf += HTC_HDR_LENGTH;
1890 packet->act_len -= HTC_HDR_LENGTH;
1894 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
1895 "", packet->buf, packet->act_len);
1900 static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1901 struct htc_packet *packet)
1903 ath6kl_dbg(ATH6KL_DBG_HTC,
1904 "htc rx complete ep %d packet 0x%p\n",
1905 endpoint->eid, packet);
1906 endpoint->ep_cb.rx(endpoint->target, packet);
1909 static int ath6kl_htc_rx_bundle(struct htc_target *target,
1910 struct list_head *rxq,
1911 struct list_head *sync_compq,
1912 int *n_pkt_fetched, bool part_bundle)
1914 struct hif_scatter_req *scat_req;
1915 struct htc_packet *packet;
1916 int rem_space = target->max_rx_bndl_sz;
1917 int n_scat_pkt, status = 0, i, len;
1919 n_scat_pkt = get_queue_depth(rxq);
1920 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1922 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1924 * We were forced to split this bundle receive operation
1925 * all packets in this partial bundle must have their
1926 * lookaheads ignored.
1931 * This would only happen if the target ignored our max
1934 ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
1935 __func__, get_queue_depth(rxq), n_scat_pkt);
1940 ath6kl_dbg(ATH6KL_DBG_HTC,
1941 "htc rx bundle depth %d pkts %d\n",
1942 get_queue_depth(rxq), n_scat_pkt);
1944 scat_req = hif_scatter_req_get(target->dev->ar);
1946 if (scat_req == NULL)
1949 for (i = 0; i < n_scat_pkt; i++) {
1952 packet = list_first_entry(rxq, struct htc_packet, list);
1953 list_del(&packet->list);
1955 pad_len = CALC_TXRX_PADDED_LEN(target,
1958 if ((rem_space - pad_len) < 0) {
1959 list_add(&packet->list, rxq);
1963 rem_space -= pad_len;
1965 if (part_bundle || (i < (n_scat_pkt - 1)))
1967 * Packet 0..n-1 cannot be checked for look-aheads
1968 * since we are fetching a bundle the last packet
1969 * however can have it's lookahead used
1971 packet->info.rx.rx_flags |=
1972 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1974 /* NOTE: 1 HTC packet per scatter entry */
1975 scat_req->scat_list[i].buf = packet->buf;
1976 scat_req->scat_list[i].len = pad_len;
1978 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1980 list_add_tail(&packet->list, sync_compq);
1982 WARN_ON(!scat_req->scat_list[i].len);
1983 len += scat_req->scat_list[i].len;
1986 scat_req->len = len;
1987 scat_req->scat_entries = i;
1989 status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
1994 /* free scatter request */
1995 hif_scatter_req_add(target->dev->ar, scat_req);
2002 static int ath6kl_htc_rx_process_packets(struct htc_target *target,
2003 struct list_head *comp_pktq,
2007 struct htc_packet *packet, *tmp_pkt;
2008 struct htc_endpoint *ep;
2011 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
2012 ep = &target->endpoint[packet->endpoint];
2014 /* process header for each of the recv packet */
2015 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
2020 list_del(&packet->list);
2022 if (list_empty(comp_pktq)) {
2024 * Last packet's more packet flag is set
2025 * based on the lookahead.
2028 ath6kl_htc_rx_set_indicate(lk_ahds[0],
2032 * Packets in a bundle automatically have
2035 packet->info.rx.indicat_flags |=
2036 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
2038 ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
2040 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
2041 ep->ep_st.rx_bundl += 1;
2043 ath6kl_htc_rx_complete(ep, packet);
2049 static int ath6kl_htc_rx_fetch(struct htc_target *target,
2050 struct list_head *rx_pktq,
2051 struct list_head *comp_pktq)
2054 bool part_bundle = false;
2056 struct list_head tmp_rxq;
2057 struct htc_packet *packet, *tmp_pkt;
2059 /* now go fetch the list of HTC packets */
2060 while (!list_empty(rx_pktq)) {
2063 INIT_LIST_HEAD(&tmp_rxq);
2065 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
2067 * There are enough packets to attempt a
2068 * bundle transfer and recv bundling is
2071 status = ath6kl_htc_rx_bundle(target, rx_pktq,
2078 if (!list_empty(rx_pktq))
2081 list_splice_tail_init(&tmp_rxq, comp_pktq);
2084 if (!fetched_pkts) {
2086 packet = list_first_entry(rx_pktq, struct htc_packet,
2089 /* fully synchronous */
2090 packet->completion = NULL;
2092 if (!list_is_singular(rx_pktq))
2094 * look_aheads in all packet
2095 * except the last one in the
2096 * bundle must be ignored
2098 packet->info.rx.rx_flags |=
2099 HTC_RX_PKT_IGNORE_LOOKAHEAD;
2101 /* go fetch the packet */
2102 status = ath6kl_htc_rx_packet(target, packet,
2105 list_move_tail(&packet->list, &tmp_rxq);
2110 list_splice_tail_init(&tmp_rxq, comp_pktq);
2119 * Cleanup any packets we allocated but didn't use to
2120 * actually fetch any packets.
2123 list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
2124 list_del(&packet->list);
2125 htc_reclaim_rxbuf(target, packet,
2126 &target->endpoint[packet->endpoint]);
2129 list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
2130 list_del(&packet->list);
2131 htc_reclaim_rxbuf(target, packet,
2132 &target->endpoint[packet->endpoint]);
2138 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
2139 u32 msg_look_ahead, int *num_pkts)
2141 struct htc_packet *packets, *tmp_pkt;
2142 struct htc_endpoint *endpoint;
2143 struct list_head rx_pktq, comp_pktq;
2145 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
2146 int num_look_ahead = 1;
2147 enum htc_endpoint_id id;
2150 INIT_LIST_HEAD(&comp_pktq);
2154 * On first entry copy the look_aheads into our temp array for
2157 look_aheads[0] = msg_look_ahead;
2162 * First lookahead sets the expected endpoint IDs for all
2163 * packets in a bundle.
2165 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
2166 endpoint = &target->endpoint[id];
2168 if (id >= ENDPOINT_MAX) {
2169 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
2175 INIT_LIST_HEAD(&rx_pktq);
2176 INIT_LIST_HEAD(&comp_pktq);
2179 * Try to allocate as many HTC RX packets indicated by the
2182 status = ath6kl_htc_rx_alloc(target, look_aheads,
2183 num_look_ahead, endpoint,
2188 if (get_queue_depth(&rx_pktq) >= 2)
2190 * A recv bundle was detected, force IRQ status
2193 target->chk_irq_status_cnt = 1;
2195 n_fetched += get_queue_depth(&rx_pktq);
2199 status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
2202 ath6kl_htc_rx_chk_water_mark(endpoint);
2204 /* Process fetched packets */
2205 status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
2209 if (!num_look_ahead || status)
2213 * For SYNCH processing, if we get here, we are running
2214 * through the loop again due to a detected lookahead. Set
2215 * flag that we should re-check IRQ status registers again
2216 * before leaving IRQ processing, this can net better
2217 * performance in high throughput situations.
2219 target->chk_irq_status_cnt = 1;
2223 ath6kl_err("failed to get pending recv messages: %d\n",
2226 /* cleanup any packets in sync completion queue */
2227 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
2228 list_del(&packets->list);
2229 htc_reclaim_rxbuf(target, packets,
2230 &target->endpoint[packets->endpoint]);
2233 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2234 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
2235 ath6kl_hif_rx_control(target->dev, false);
2240 * Before leaving, check to see if host ran out of buffers and
2241 * needs to stop the receiver.
2243 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2244 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
2245 ath6kl_hif_rx_control(target->dev, false);
2247 *num_pkts = n_fetched;
2253 * Synchronously wait for a control message from the target,
2254 * This function is used at initialization time ONLY. At init messages
2255 * on ENDPOINT 0 are expected.
2257 static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
2259 struct htc_packet *packet = NULL;
2260 struct htc_frame_hdr *htc_hdr;
2263 if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
2264 HTC_TARGET_RESPONSE_TIMEOUT))
2267 ath6kl_dbg(ATH6KL_DBG_HTC,
2268 "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
2270 htc_hdr = (struct htc_frame_hdr *)&look_ahead;
2272 if (htc_hdr->eid != ENDPOINT_0)
2275 packet = htc_get_control_buf(target, false);
2280 packet->info.rx.rx_flags = 0;
2281 packet->info.rx.exp_hdr = look_ahead;
2282 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
2284 if (packet->act_len > packet->buf_len)
2287 /* we want synchronous operation */
2288 packet->completion = NULL;
2290 /* get the message from the device, this will block */
2291 if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
2294 /* process receive header */
2295 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
2297 if (packet->status) {
2298 ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
2306 if (packet != NULL) {
2307 htc_rxpkt_reset(packet);
2308 reclaim_rx_ctrl_buf(target, packet);
2314 static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
2315 struct list_head *pkt_queue)
2317 struct htc_endpoint *endpoint;
2318 struct htc_packet *first_pkt;
2319 bool rx_unblock = false;
2320 int status = 0, depth;
2322 if (list_empty(pkt_queue))
2325 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
2327 if (first_pkt->endpoint >= ENDPOINT_MAX)
2330 depth = get_queue_depth(pkt_queue);
2332 ath6kl_dbg(ATH6KL_DBG_HTC,
2333 "htc rx add multiple ep id %d cnt %d len %d\n",
2334 first_pkt->endpoint, depth, first_pkt->buf_len);
2336 endpoint = &target->endpoint[first_pkt->endpoint];
2338 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2339 struct htc_packet *packet, *tmp_pkt;
2341 /* walk through queue and mark each one canceled */
2342 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
2343 packet->status = -ECANCELED;
2344 list_del(&packet->list);
2345 ath6kl_htc_rx_complete(endpoint, packet);
2351 spin_lock_bh(&target->rx_lock);
2353 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
2355 /* check if we are blocked waiting for a new buffer */
2356 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2357 if (target->ep_waiting == first_pkt->endpoint) {
2358 ath6kl_dbg(ATH6KL_DBG_HTC,
2359 "htc rx blocked on ep %d, unblocking\n",
2360 target->ep_waiting);
2361 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
2362 target->ep_waiting = ENDPOINT_MAX;
2367 spin_unlock_bh(&target->rx_lock);
2369 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
2370 /* TODO : implement a buffer threshold count? */
2371 ath6kl_hif_rx_control(target->dev, true);
2376 static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target)
2378 struct htc_endpoint *endpoint;
2379 struct htc_packet *packet, *tmp_pkt;
2382 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2383 endpoint = &target->endpoint[i];
2384 if (!endpoint->svc_id)
2388 spin_lock_bh(&target->rx_lock);
2389 list_for_each_entry_safe(packet, tmp_pkt,
2390 &endpoint->rx_bufq, list) {
2391 list_del(&packet->list);
2392 spin_unlock_bh(&target->rx_lock);
2393 ath6kl_dbg(ATH6KL_DBG_HTC,
2394 "htc rx flush pkt 0x%p len %d ep %d\n",
2395 packet, packet->buf_len,
2398 * packets in rx_bufq of endpoint 0 have originally
2399 * been queued from target->free_ctrl_rxbuf where
2400 * packet and packet->buf_start are allocated
2401 * separately using kmalloc(). For other endpoint
2402 * rx_bufq, it is allocated as skb where packet is
2403 * skb->head. Take care of this difference while freeing
2406 if (packet->endpoint == ENDPOINT_0) {
2407 kfree(packet->buf_start);
2410 dev_kfree_skb(packet->pkt_cntxt);
2412 spin_lock_bh(&target->rx_lock);
2414 spin_unlock_bh(&target->rx_lock);
2418 static int ath6kl_htc_mbox_conn_service(struct htc_target *target,
2419 struct htc_service_connect_req *conn_req,
2420 struct htc_service_connect_resp *conn_resp)
2422 struct htc_packet *rx_pkt = NULL;
2423 struct htc_packet *tx_pkt = NULL;
2424 struct htc_conn_service_resp *resp_msg;
2425 struct htc_conn_service_msg *conn_msg;
2426 struct htc_endpoint *endpoint;
2427 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2428 unsigned int max_msg_sz = 0;
2432 ath6kl_dbg(ATH6KL_DBG_HTC,
2433 "htc connect service target 0x%p service id 0x%x\n",
2434 target, conn_req->svc_id);
2436 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2437 /* special case for pseudo control service */
2438 assigned_ep = ENDPOINT_0;
2439 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2441 /* allocate a packet to send to the target */
2442 tx_pkt = htc_get_control_buf(target, true);
2447 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2448 memset(conn_msg, 0, sizeof(*conn_msg));
2449 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2450 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2451 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2453 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2454 sizeof(*conn_msg) + conn_msg->svc_meta_len,
2455 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2457 /* we want synchronous operation */
2458 tx_pkt->completion = NULL;
2459 ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
2460 status = ath6kl_htc_tx_issue(target, tx_pkt);
2465 /* wait for response */
2466 rx_pkt = htc_wait_for_ctrl_msg(target);
2473 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2474 msg_id = le16_to_cpu(resp_msg->msg_id);
2476 if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) ||
2477 (rx_pkt->act_len < sizeof(*resp_msg))) {
2482 conn_resp->resp_code = resp_msg->status;
2483 /* check response status */
2484 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2485 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2486 resp_msg->svc_id, resp_msg->status);
2491 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2492 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2495 if (WARN_ON_ONCE(assigned_ep == ENDPOINT_UNUSED ||
2496 assigned_ep >= ENDPOINT_MAX || !max_msg_sz)) {
2501 endpoint = &target->endpoint[assigned_ep];
2502 endpoint->eid = assigned_ep;
2503 if (endpoint->svc_id) {
2508 /* return assigned endpoint to caller */
2509 conn_resp->endpoint = assigned_ep;
2510 conn_resp->len_max = max_msg_sz;
2512 /* setup the endpoint */
2514 /* this marks the endpoint in use */
2515 endpoint->svc_id = conn_req->svc_id;
2517 endpoint->max_txq_depth = conn_req->max_txq_depth;
2518 endpoint->len_max = max_msg_sz;
2519 endpoint->ep_cb = conn_req->ep_cb;
2520 endpoint->cred_dist.svc_id = conn_req->svc_id;
2521 endpoint->cred_dist.htc_ep = endpoint;
2522 endpoint->cred_dist.endpoint = assigned_ep;
2523 endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2525 switch (endpoint->svc_id) {
2526 case WMI_DATA_BK_SVC:
2527 endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3;
2530 endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
2534 if (conn_req->max_rxmsg_sz) {
2536 * Override cred_per_msg calculation, this optimizes
2537 * the credit-low indications since the host will actually
2538 * issue smaller messages in the Send path.
2540 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2544 endpoint->cred_dist.cred_per_msg =
2545 conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2547 endpoint->cred_dist.cred_per_msg =
2548 max_msg_sz / target->tgt_cred_sz;
2550 if (!endpoint->cred_dist.cred_per_msg)
2551 endpoint->cred_dist.cred_per_msg = 1;
2553 /* save local connection flags */
2554 endpoint->conn_flags = conn_req->flags;
2558 htc_reclaim_txctrl_buf(target, tx_pkt);
2561 htc_rxpkt_reset(rx_pkt);
2562 reclaim_rx_ctrl_buf(target, rx_pkt);
2568 static void reset_ep_state(struct htc_target *target)
2570 struct htc_endpoint *endpoint;
2573 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2574 endpoint = &target->endpoint[i];
2575 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2576 endpoint->svc_id = 0;
2577 endpoint->len_max = 0;
2578 endpoint->max_txq_depth = 0;
2579 memset(&endpoint->ep_st, 0,
2580 sizeof(endpoint->ep_st));
2581 INIT_LIST_HEAD(&endpoint->rx_bufq);
2582 INIT_LIST_HEAD(&endpoint->txq);
2583 endpoint->target = target;
2586 /* reset distribution list */
2587 /* FIXME: free existing entries */
2588 INIT_LIST_HEAD(&target->cred_dist_list);
2591 static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target,
2592 enum htc_endpoint_id endpoint)
2596 spin_lock_bh(&target->rx_lock);
2597 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2598 spin_unlock_bh(&target->rx_lock);
2602 static void htc_setup_msg_bndl(struct htc_target *target)
2604 /* limit what HTC can handle */
2605 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2606 target->msg_per_bndl_max);
2608 if (ath6kl_hif_enable_scatter(target->dev->ar)) {
2609 target->msg_per_bndl_max = 0;
2613 /* limit bundle what the device layer can handle */
2614 target->msg_per_bndl_max = min(target->max_scat_entries,
2615 target->msg_per_bndl_max);
2617 ath6kl_dbg(ATH6KL_DBG_BOOT,
2618 "htc bundling allowed msg_per_bndl_max %d\n",
2619 target->msg_per_bndl_max);
2621 /* Max rx bundle size is limited by the max tx bundle size */
2622 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
2623 /* Max tx bundle size if limited by the extended mbox address range */
2624 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
2625 target->max_xfer_szper_scatreq);
2627 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
2628 target->max_rx_bndl_sz, target->max_tx_bndl_sz);
2630 if (target->max_tx_bndl_sz)
2631 /* tx_bndl_mask is enabled per AC, each has 1 bit */
2632 target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1;
2634 if (target->max_rx_bndl_sz)
2635 target->rx_bndl_enable = true;
2637 if ((target->tgt_cred_sz % target->block_sz) != 0) {
2638 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2639 target->tgt_cred_sz);
2642 * Disallow send bundling since the credit size is
2643 * not aligned to a block size the I/O block
2644 * padding will spill into the next credit buffer
2647 target->tx_bndl_mask = 0;
2651 static int ath6kl_htc_mbox_wait_target(struct htc_target *target)
2653 struct htc_packet *packet = NULL;
2654 struct htc_ready_ext_msg *rdy_msg;
2655 struct htc_service_connect_req connect;
2656 struct htc_service_connect_resp resp;
2659 /* we should be getting 1 control message that the target is ready */
2660 packet = htc_wait_for_ctrl_msg(target);
2665 /* we controlled the buffer creation so it's properly aligned */
2666 rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2668 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2669 (packet->act_len < sizeof(struct htc_ready_msg))) {
2671 goto fail_wait_target;
2674 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2676 goto fail_wait_target;
2679 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2680 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2682 ath6kl_dbg(ATH6KL_DBG_BOOT,
2683 "htc target ready credits %d size %d\n",
2684 target->tgt_creds, target->tgt_cred_sz);
2686 /* check if this is an extended ready message */
2687 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2688 /* this is an extended message */
2689 target->htc_tgt_ver = rdy_msg->htc_ver;
2690 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2693 target->htc_tgt_ver = HTC_VERSION_2P0;
2694 target->msg_per_bndl_max = 0;
2697 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
2698 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2699 target->htc_tgt_ver);
2701 if (target->msg_per_bndl_max > 0)
2702 htc_setup_msg_bndl(target);
2704 /* setup our pseudo HTC control endpoint connection */
2705 memset(&connect, 0, sizeof(connect));
2706 memset(&resp, 0, sizeof(resp));
2707 connect.ep_cb.rx = htc_ctrl_rx;
2708 connect.ep_cb.rx_refill = NULL;
2709 connect.ep_cb.tx_full = NULL;
2710 connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2711 connect.svc_id = HTC_CTRL_RSVD_SVC;
2713 /* connect fake service */
2714 status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp);
2718 * FIXME: this call doesn't make sense, the caller should
2719 * call ath6kl_htc_mbox_cleanup() when it wants remove htc
2721 ath6kl_hif_cleanup_scatter(target->dev->ar);
2725 htc_rxpkt_reset(packet);
2726 reclaim_rx_ctrl_buf(target, packet);
2733 * Start HTC, enable interrupts and let the target know
2734 * host has finished setup.
2736 static int ath6kl_htc_mbox_start(struct htc_target *target)
2738 struct htc_packet *packet;
2741 memset(&target->dev->irq_proc_reg, 0,
2742 sizeof(target->dev->irq_proc_reg));
2744 /* Disable interrupts at the chip level */
2745 ath6kl_hif_disable_intrs(target->dev);
2747 target->htc_flags = 0;
2748 target->rx_st_flags = 0;
2750 /* Push control receive buffers into htc control endpoint */
2751 while ((packet = htc_get_control_buf(target, false)) != NULL) {
2752 status = htc_add_rxbuf(target, packet);
2757 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2758 ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
2761 dump_cred_dist_stats(target);
2763 /* Indicate to the target of the setup completion */
2764 status = htc_setup_tx_complete(target);
2769 /* unmask interrupts */
2770 status = ath6kl_hif_unmask_intrs(target->dev);
2773 ath6kl_htc_mbox_stop(target);
2778 static int ath6kl_htc_reset(struct htc_target *target)
2780 u32 block_size, ctrl_bufsz;
2781 struct htc_packet *packet;
2784 reset_ep_state(target);
2786 block_size = target->dev->ar->mbox_info.block_size;
2788 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2789 (block_size + HTC_HDR_LENGTH) :
2790 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2792 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2793 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2797 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2798 if (!packet->buf_start) {
2803 packet->buf_len = ctrl_bufsz;
2804 if (i < NUM_CONTROL_RX_BUFFERS) {
2805 packet->act_len = 0;
2806 packet->buf = packet->buf_start;
2807 packet->endpoint = ENDPOINT_0;
2808 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2810 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2816 /* htc_stop: stop interrupt reception, and flush all queued buffers */
2817 static void ath6kl_htc_mbox_stop(struct htc_target *target)
2819 spin_lock_bh(&target->htc_lock);
2820 target->htc_flags |= HTC_OP_STATE_STOPPING;
2821 spin_unlock_bh(&target->htc_lock);
2824 * Masking interrupts is a synchronous operation, when this
2825 * function returns all pending HIF I/O has completed, we can
2826 * safely flush the queues.
2828 ath6kl_hif_mask_intrs(target->dev);
2830 ath6kl_htc_flush_txep_all(target);
2832 ath6kl_htc_mbox_flush_rx_buf(target);
2834 ath6kl_htc_reset(target);
2837 static void *ath6kl_htc_mbox_create(struct ath6kl *ar)
2839 struct htc_target *target = NULL;
2842 target = kzalloc(sizeof(*target), GFP_KERNEL);
2844 ath6kl_err("unable to allocate memory\n");
2848 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2850 ath6kl_err("unable to allocate memory\n");
2852 goto err_htc_cleanup;
2855 spin_lock_init(&target->htc_lock);
2856 spin_lock_init(&target->rx_lock);
2857 spin_lock_init(&target->tx_lock);
2859 INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2860 INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2861 INIT_LIST_HEAD(&target->cred_dist_list);
2863 target->dev->ar = ar;
2864 target->dev->htc_cnxt = target;
2865 target->ep_waiting = ENDPOINT_MAX;
2867 status = ath6kl_hif_setup(target->dev);
2869 goto err_htc_cleanup;
2871 status = ath6kl_htc_reset(target);
2873 goto err_htc_cleanup;
2878 ath6kl_htc_mbox_cleanup(target);
2883 /* cleanup the HTC instance */
2884 static void ath6kl_htc_mbox_cleanup(struct htc_target *target)
2886 struct htc_packet *packet, *tmp_packet;
2888 ath6kl_hif_cleanup_scatter(target->dev->ar);
2890 list_for_each_entry_safe(packet, tmp_packet,
2891 &target->free_ctrl_txbuf, list) {
2892 list_del(&packet->list);
2893 kfree(packet->buf_start);
2897 list_for_each_entry_safe(packet, tmp_packet,
2898 &target->free_ctrl_rxbuf, list) {
2899 list_del(&packet->list);
2900 kfree(packet->buf_start);
2908 static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = {
2909 .create = ath6kl_htc_mbox_create,
2910 .wait_target = ath6kl_htc_mbox_wait_target,
2911 .start = ath6kl_htc_mbox_start,
2912 .conn_service = ath6kl_htc_mbox_conn_service,
2913 .tx = ath6kl_htc_mbox_tx,
2914 .stop = ath6kl_htc_mbox_stop,
2915 .cleanup = ath6kl_htc_mbox_cleanup,
2916 .flush_txep = ath6kl_htc_mbox_flush_txep,
2917 .flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf,
2918 .activity_changed = ath6kl_htc_mbox_activity_changed,
2919 .get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num,
2920 .add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple,
2921 .credit_setup = ath6kl_htc_mbox_credit_setup,
2924 void ath6kl_htc_mbox_attach(struct ath6kl *ar)
2926 ar->htc_ops = &ath6kl_htc_mbox_ops;