Merge tag 'lsk-v4.4-16.06-android'
[firefly-linux-kernel-4.4.55.git] / drivers / net / wireless / rockchip_wlan / rkwifi / bcmdhd / dhd_msgbuf.c
1 /**
2  * @file definition of host message ring functionality
3  * Provides type definitions and function prototypes used to link the
4  * DHD OS, bus, and protocol modules.
5  *
6  * Copyright (C) 1999-2016, Broadcom Corporation
7  * 
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  * 
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  * 
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Open:>>
28  *
29  * $Id: dhd_msgbuf.c 605475 2015-12-10 12:49:49Z $
30  */
31
32
33 #include <typedefs.h>
34 #include <osl.h>
35
36 #include <bcmutils.h>
37 #include <bcmmsgbuf.h>
38 #include <bcmendian.h>
39
40 #include <dngl_stats.h>
41 #include <dhd.h>
42 #include <dhd_proto.h>
43
44 #include <dhd_bus.h>
45
46 #include <dhd_dbg.h>
47 #include <siutils.h>
48
49
50 #include <dhd_flowring.h>
51
52 #include <pcie_core.h>
53 #include <bcmpcie.h>
54 #include <dhd_pcie.h>
55
56 #if defined(DHD_LB)
57 #include <linux/cpu.h>
58 #include <bcm_ring.h>
59 #define DHD_LB_WORKQ_SZ                            (8192)
60 #define DHD_LB_WORKQ_SYNC           (16)
61 #define DHD_LB_WORK_SCHED           (DHD_LB_WORKQ_SYNC * 2)
62 #endif /* DHD_LB */
63
64
65 /**
66  * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
67  * address where a value must be written. Host may also interrupt coalescing
68  * on this soft doorbell.
69  * Use Case: Hosts with network processors, may register with the dongle the
70  * network processor's thread wakeup register and a value corresponding to the
71  * core/thread context. Dongle will issue a write transaction <address,value>
72  * to the PCIE RC which will need to be routed to the mapped register space, by
73  * the host.
74  */
75 /* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
76
77 /* Dependency Check */
78 #if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
79 #error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
80 #endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
81
82 #define RETRIES 2               /* # of retries to retrieve matching ioctl response */
83
84 #define DEFAULT_RX_BUFFERS_TO_POST      256
85 #define RXBUFPOST_THRESHOLD                     32
86 #define RX_BUF_BURST                            32 /* Rx buffers for MSDU Data */
87
88 #define DHD_STOP_QUEUE_THRESHOLD        200
89 #define DHD_START_QUEUE_THRESHOLD       100
90
91 #define RX_DMA_OFFSET           8 /* Mem2mem DMA inserts an extra 8 */
92 #define IOCT_RETBUF_SIZE        (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
93 #define FLOWRING_SIZE           (H2DRING_TXPOST_MAX_ITEM * H2DRING_TXPOST_ITEMSIZE)
94
95 /* flags for ioctl pending status */
96 #define MSGBUF_IOCTL_ACK_PENDING        (1<<0)
97 #define MSGBUF_IOCTL_RESP_PENDING       (1<<1)
98
99 #define DMA_ALIGN_LEN           4
100
101 #define DMA_D2H_SCRATCH_BUF_LEN 8
102 #define DMA_XFER_LEN_LIMIT      0x400000
103
104 #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ                8192
105
106 #define DHD_FLOWRING_MAX_EVENTBUF_POST                  8
107 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST              8
108
109 #define DHD_PROT_FUNCS  37
110
111 /* Length of buffer in host for bus throughput measurement */
112 #define DHD_BUS_TPUT_BUF_LEN 2048
113
114 #define TXP_FLUSH_NITEMS
115
116 /* optimization to write "n" tx items at a time to ring */
117 #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT   48
118
119 #define RING_NAME_MAX_LENGTH            24
120
121
122 struct msgbuf_ring; /* ring context for common and flow rings */
123
124 /**
125  * PCIE D2H DMA Complete Sync Modes
126  *
127  * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
128  * Host system memory. A WAR using one of 3 approaches is needed:
129  * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
130  * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
131  *    writes in the last word of each work item. Each work item has a seqnum
132  *    number = sequence num % 253.
133  *
134  * 3. Read Barrier: Dongle does a host memory read access prior to posting an
135  *    interrupt, ensuring that D2H data transfer indeed completed.
136  * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
137  *    ring contents before the indices.
138  *
139  * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
140  * callback (see dhd_prot_d2h_sync_none) may be bound.
141  *
142  * Dongle advertizes host side sync mechanism requirements.
143  */
144 #define PCIE_D2H_SYNC
145
146 #if defined(PCIE_D2H_SYNC)
147 #define PCIE_D2H_SYNC_WAIT_TRIES    (512UL)
148 #define PCIE_D2H_SYNC_NUM_OF_STEPS      (3UL)
149 #define PCIE_D2H_SYNC_DELAY                     (50UL)  /* in terms of usecs */
150
151 /**
152  * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
153  *
154  * On success: return cmn_msg_hdr_t::msg_type
155  * On failure: return 0 (invalid msg_type)
156  */
157 typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
158                                 volatile cmn_msg_hdr_t *msg, int msglen);
159 #endif /* PCIE_D2H_SYNC */
160
161
162 /*
163  * +----------------------------------------------------------------------------
164  *
165  * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
166  * flowids do not.
167  *
168  * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
169  * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
170  *
171  * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
172  *  BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
173  *  BCMPCIE_COMMON_MSGRINGS     = 5, i.e. include 3 D2H common rings.
174  *
175  *  H2D Control  Submit   RingId = 0        FlowId = 0 reserved never allocated
176  *  H2D RxPost   Submit   RingId = 1        FlowId = 1 reserved never allocated
177  *
178  *  D2H Control  Complete RingId = 2
179  *  D2H Transmit Complete RingId = 3
180  *  D2H Receive  Complete RingId = 4
181  *
182  *  H2D TxPost   FLOWRING RingId = 5         FlowId = 2     (1st flowring)
183  *  H2D TxPost   FLOWRING RingId = 6         FlowId = 3     (2nd flowring)
184  *  H2D TxPost   FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
185  *
186  * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
187  * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
188  *
189  * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
190  * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
191  * FlowId values would be in the range [2..133] and the corresponding
192  * RingId values would be in the range [5..136].
193  *
194  * The flowId allocator, may chose to, allocate Flowids:
195  *   bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
196  *   X# of uc flowids in consecutive ranges (per station Id), where X is the
197  *   packet's access category (e.g. 4 uc flowids per station).
198  *
199  * CAUTION:
200  * When DMA indices array feature is used, RingId=5, corresponding to the 0th
201  * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
202  * since the FlowId truly represents the index in the H2D DMA indices array.
203  *
204  * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
205  * will represent the index in the D2H DMA indices array.
206  *
207  * +----------------------------------------------------------------------------
208  */
209
210 /* First TxPost Flowring Id */
211 #define DHD_FLOWRING_START_FLOWID   BCMPCIE_H2D_COMMON_MSGRINGS
212
213 /* Determine whether a ringid belongs to a TxPost flowring */
214 #define DHD_IS_FLOWRING(ringid) \
215         ((ringid) >= BCMPCIE_COMMON_MSGRINGS)
216
217 /* Convert a H2D TxPost FlowId to a MsgBuf RingId */
218 #define DHD_FLOWID_TO_RINGID(flowid) \
219         (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
220
221 /* Convert a MsgBuf RingId to a H2D TxPost FlowId */
222 #define DHD_RINGID_TO_FLOWID(ringid) \
223         (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
224
225 /* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
226  * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
227  * any array of H2D rings.
228  */
229 #define DHD_H2D_RING_OFFSET(ringid) \
230         ((DHD_IS_FLOWRING(ringid)) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
231
232 /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
233  * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
234  * any array of D2H rings.
235  */
236 #define DHD_D2H_RING_OFFSET(ringid) \
237         ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS)
238
239 /* Convert a D2H DMA Indices Offset to a RingId */
240 #define DHD_D2H_RINGID(offset) \
241         ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
242
243
244 #define DHD_DMAH_NULL      ((void*)NULL)
245
246 /*
247  * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
248  * buffer does not occupy the entire cacheline, and another object is placed
249  * following the DMA-able buffer, data corruption may occur if the DMA-able
250  * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
251  * is not available.
252  */
253 #if defined(L1_CACHE_BYTES)
254 #define DHD_DMA_PAD        (L1_CACHE_BYTES)
255 #else
256 #define DHD_DMA_PAD        (128)
257 #endif
258
259 /* Used in loopback tests */
260 typedef struct dhd_dmaxfer {
261         dhd_dma_buf_t srcmem;
262         dhd_dma_buf_t dstmem;
263         uint32        srcdelay;
264         uint32        destdelay;
265         uint32        len;
266         bool          in_progress;
267 } dhd_dmaxfer_t;
268
269 /**
270  * msgbuf_ring : This object manages the host side ring that includes a DMA-able
271  * buffer, the WR and RD indices, ring parameters such as max number of items
272  * an length of each items, and other miscellaneous runtime state.
273  * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
274  * H2D TxPost ring as specified in the PCIE FullDongle Spec.
275  * Ring parameters are conveyed to the dongle, which maintains its own peer end
276  * ring state. Depending on whether the DMA Indices feature is supported, the
277  * host will update the WR/RD index in the DMA indices array in host memory or
278  * directly in dongle memory.
279  */
280 typedef struct msgbuf_ring {
281         bool           inited;
282         uint16         idx;       /* ring id */
283         uint16         rd;        /* read index */
284         uint16         curr_rd;   /* read index for debug */
285         uint16         wr;        /* write index */
286         uint16         max_items; /* maximum number of items in ring */
287         uint16         item_len;  /* length of each item in the ring */
288         sh_addr_t      base_addr; /* LITTLE ENDIAN formatted: base address */
289         dhd_dma_buf_t  dma_buf;   /* DMA-able buffer: pa, va, len, dmah, secdma */
290         uint32         seqnum;    /* next expected item's sequence number */
291 #ifdef TXP_FLUSH_NITEMS
292         void           *start_addr;
293         /* # of messages on ring not yet announced to dongle */
294         uint16         pend_items_count;
295 #endif /* TXP_FLUSH_NITEMS */
296         uchar           name[RING_NAME_MAX_LENGTH];
297 } msgbuf_ring_t;
298
299 #define DHD_RING_BGN_VA(ring)           ((ring)->dma_buf.va)
300 #define DHD_RING_END_VA(ring) \
301         ((uint8 *)(DHD_RING_BGN_VA((ring))) + \
302          (((ring)->max_items - 1) * (ring)->item_len))
303
304
305
306 /** DHD protocol handle. Is an opaque type to other DHD software layers. */
307 typedef struct dhd_prot {
308         osl_t *osh;             /* OSL handle */
309         uint16 rxbufpost;
310         uint16 max_rxbufpost;
311         uint16 max_eventbufpost;
312         uint16 max_ioctlrespbufpost;
313         uint16 cur_event_bufs_posted;
314         uint16 cur_ioctlresp_bufs_posted;
315
316         /* Flow control mechanism based on active transmits pending */
317         uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */
318         uint16 max_tx_count;
319         uint16 txp_threshold;  /* optimization to write "n" tx items at a time to ring */
320
321         /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
322         msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
323         msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
324         msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
325         msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
326         msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
327
328         msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
329         dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
330         uint16        h2d_rings_total; /* total H2D (common rings + flowrings) */
331
332         uint32          rx_dataoffset;
333
334         dhd_mb_ring_t   mb_ring_fn;     /* called when dongle needs to be notified of new msg */
335
336         /* ioctl related resources */
337         uint8 ioctl_state;
338         int16 ioctl_status;             /* status returned from dongle */
339         uint16 ioctl_resplen;
340         dhd_ioctl_recieved_status_t ioctl_received;
341         uint curr_ioctl_cmd;
342         dhd_dma_buf_t   retbuf;         /* For holding ioctl response */
343         dhd_dma_buf_t   ioctbuf;        /* For holding ioctl request */
344
345         dhd_dma_buf_t   d2h_dma_scratch_buf;    /* For holding d2h scratch */
346
347         /* DMA-able arrays for holding WR and RD indices */
348         uint32          rw_index_sz; /* Size of a RD or WR index in dongle */
349         dhd_dma_buf_t   h2d_dma_indx_wr_buf;    /* Array of H2D WR indices */
350         dhd_dma_buf_t   h2d_dma_indx_rd_buf;    /* Array of H2D RD indices */
351         dhd_dma_buf_t   d2h_dma_indx_wr_buf;    /* Array of D2H WR indices */
352         dhd_dma_buf_t   d2h_dma_indx_rd_buf;    /* Array of D2H RD indices */
353
354         dhd_dma_buf_t   host_bus_throughput_buf; /* bus throughput measure buffer */
355
356         dhd_dma_buf_t   *flowring_buf;    /* pool of flow ring buf */
357         uint32                  flowring_num;
358
359 #if defined(PCIE_D2H_SYNC)
360         d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
361         ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
362         ulong d2h_sync_wait_tot; /* total wait loops */
363 #endif  /* PCIE_D2H_SYNC */
364
365         dhd_dmaxfer_t   dmaxfer; /* for test/DMA loopback */
366
367         uint16          ioctl_seq_no;
368         uint16          data_seq_no;
369         uint16          ioctl_trans_id;
370         void            *pktid_map_handle; /* a pktid maps to a packet and its metadata */
371         bool            metadata_dbg;
372         void            *pktid_map_handle_ioctl;
373
374         /* Applications/utilities can read tx and rx metadata using IOVARs */
375         uint16          rx_metadata_offset;
376         uint16          tx_metadata_offset;
377
378
379 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
380         /* Host's soft doorbell configuration */
381         bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
382 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
383 #if defined(DHD_LB)
384         /* Work Queues to be used by the producer and the consumer, and threshold
385          * when the WRITE index must be synced to consumer's workq
386          */
387 #if defined(DHD_LB_TXC)
388         uint32 tx_compl_prod_sync ____cacheline_aligned;
389         bcm_workq_t tx_compl_prod, tx_compl_cons;
390 #endif /* DHD_LB_TXC */
391 #if defined(DHD_LB_RXC)
392         uint32 rx_compl_prod_sync ____cacheline_aligned;
393         bcm_workq_t rx_compl_prod, rx_compl_cons;
394 #endif /* DHD_LB_RXC */
395 #endif /* DHD_LB */
396 } dhd_prot_t;
397
398 /* Convert a dmaaddr_t to a base_addr with htol operations */
399 static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
400
401 /* APIs for managing a DMA-able buffer */
402 static int  dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
403 static int  dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
404 static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
405 static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
406
407 /* msgbuf ring management */
408 static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
409         const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
410 static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
411 static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
412 static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
413
414 /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
415 static int  dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
416 static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
417 static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
418
419 /* Fetch and Release a flowring msgbuf_ring from flowring  pool */
420 static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
421         uint16 flowid);
422 /* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
423
424 /* Producer: Allocate space in a msgbuf ring */
425 static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
426         uint16 nitems, uint16 *alloced, bool exactly_nitems);
427 static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
428         uint16 *alloced, bool exactly_nitems);
429
430 /* Consumer: Determine the location where the next message may be consumed */
431 static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
432         uint32 *available_len);
433
434 /* Producer (WR index update) or Consumer (RD index update) indication */
435 static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
436         void *p, uint16 len);
437 static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
438
439 /* Allocate DMA-able memory for saving H2D/D2H WR/RD indices */
440 static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
441         dhd_dma_buf_t *dma_buf, uint32 bufsz);
442
443 /* Set/Get a RD or WR index in the array of indices */
444 /* See also: dhd_prot_dma_indx_init() */
445 static void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
446         uint16 ringid);
447 static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
448
449 /* Locate a packet given a pktid */
450 static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
451         bool free_pktid);
452 /* Locate a packet given a PktId and free it. */
453 static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
454
455 static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
456         void *buf, uint len, uint8 action);
457 static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
458         void *buf, uint len, uint8 action);
459 static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
460 static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
461         void *buf, int ifidx);
462
463 /* Post buffers for Rx, control ioctl response and events */
464 static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post);
465 static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
466 static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
467 static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
468 static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
469
470 static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
471
472 /* D2H Message handling */
473 static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
474
475 /* D2H Message handlers */
476 static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
477 static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
478 static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
479 static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
480 static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
481 static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
482 static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg);
483 static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
484
485 /* Loopback test with dongle */
486 static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
487 static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
488         uint destdelay, dhd_dmaxfer_t *dma);
489 static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
490
491 /* Flowring management communication with dongle */
492 static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
493 static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
494 static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
495
496 /* Configure a soft doorbell per D2H ring */
497 static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
498 static void dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg);
499
500 typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
501
502 /** callback functions for messages generated by the dongle */
503 #define MSG_TYPE_INVALID 0
504
505 static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
506         dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
507         dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
508         dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
509         NULL,
510         dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
511         NULL,
512         dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
513         NULL,
514         dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
515         NULL,
516         dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
517         NULL,
518         dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
519         NULL,
520         dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
521         NULL,
522         dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
523         NULL,
524         dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */
525         NULL,
526         dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
527         NULL, /* MSG_TYPE_FLOW_RING_RESUME */
528         NULL, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
529         NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
530         NULL, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
531         NULL, /* MSG_TYPE_INFO_BUF_POST */
532         NULL, /* MSG_TYPE_INFO_BUF_CMPLT */
533         NULL, /* MSG_TYPE_H2D_RING_CREATE */
534         NULL, /* MSG_TYPE_D2H_RING_CREATE */
535         NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
536         NULL, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
537         NULL, /* MSG_TYPE_H2D_RING_CONFIG */
538         NULL, /* MSG_TYPE_D2H_RING_CONFIG */
539         NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
540         dhd_prot_d2h_ring_config_cmplt_process, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
541         NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
542         NULL, /* MSG_TYPE_D2H_MAILBOX_DATA */
543 };
544
545
546 #ifdef DHD_RX_CHAINING
547
548 #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
549         (!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
550          !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
551          !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
552          !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
553          ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
554          ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
555          (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))) && \
556          dhd_l2_filter_chainable((dhd), (evh), (ifidx)))
557
558 static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
559 static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
560 static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
561
562 #define DHD_PKT_CTF_MAX_CHAIN_LEN       64
563
564 #endif /* DHD_RX_CHAINING */
565
566 static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
567
568 #if defined(PCIE_D2H_SYNC) /* avoids problems related to host CPU cache */
569
570 /**
571  * D2H DMA to completion callback handlers. Based on the mode advertised by the
572  * dongle through the PCIE shared region, the appropriate callback will be
573  * registered in the proto layer to be invoked prior to precessing any message
574  * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
575  * does not require host participation, then a noop callback handler will be
576  * bound that simply returns the msg_type.
577  */
578 static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring,
579                                        uint32 tries, uchar *msg, int msglen);
580 static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
581                                       volatile cmn_msg_hdr_t *msg, int msglen);
582 static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
583                                        volatile cmn_msg_hdr_t *msg, int msglen);
584 static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
585                                     volatile cmn_msg_hdr_t *msg, int msglen);
586 static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
587
588 void dhd_prot_collect_memdump(dhd_pub_t *dhd)
589 {
590         DHD_ERROR(("%s(): Collecting mem dump now \r\n", __FUNCTION__));
591 #ifdef DHD_FW_COREDUMP
592         if (dhd->memdump_enabled) {
593                 /* collect core dump */
594                 dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
595                 dhd_bus_mem_dump(dhd);
596         }
597 #endif /* DHD_FW_COREDUMP */
598 #ifdef SUPPORT_LINKDOWN_RECOVERY
599 #ifdef CONFIG_ARCH_MSM
600         dhd->bus->no_cfg_restore = 1;
601 #endif /* CONFIG_ARCH_MSM */
602         dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
603         dhd_os_send_hang_message(dhd);
604 #endif /* SUPPORT_LINKDOWN_RECOVERY */
605 }
606
607 /**
608  * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
609  * not completed, a livelock condition occurs. Host will avert this livelock by
610  * dropping this message and moving to the next. This dropped message can lead
611  * to a packet leak, or even something disastrous in the case the dropped
612  * message happens to be a control response.
613  * Here we will log this condition. One may choose to reboot the dongle.
614  *
615  */
616 static void
617 dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 tries,
618                            uchar *msg, int msglen)
619 {
620         uint32 seqnum = ring->seqnum;
621
622         DHD_ERROR(("LIVELOCK DHD<%p> name<%s> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>"
623                 "dma_buf va<%p> msg<%p> curr_rd<%d>\n",
624                 dhd, ring->name, seqnum, seqnum% D2H_EPOCH_MODULO, tries,
625                 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
626                 ring->dma_buf.va, msg, ring->curr_rd));
627         prhex("D2H MsgBuf Failure", (uchar *)msg, msglen);
628         dhd_dump_to_kernelog(dhd);
629
630 #ifdef DHD_FW_COREDUMP
631         if (dhd->memdump_enabled) {
632                 /* collect core dump */
633                 dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
634                 dhd_bus_mem_dump(dhd);
635         }
636 #endif /* DHD_FW_COREDUMP */
637 #ifdef SUPPORT_LINKDOWN_RECOVERY
638 #ifdef CONFIG_ARCH_MSM
639         dhd->bus->no_cfg_restore = 1;
640 #endif /* CONFIG_ARCH_MSM */
641         dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
642         dhd_os_send_hang_message(dhd);
643 #endif /* SUPPORT_LINKDOWN_RECOVERY */
644 }
645
646 /**
647  * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
648  * mode. Sequence number is always in the last word of a message.
649  */
650 static uint8 BCMFASTPATH
651 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
652                          volatile cmn_msg_hdr_t *msg, int msglen)
653 {
654         uint32 tries;
655         uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
656         int num_words = msglen / sizeof(uint32); /* num of 32bit words */
657         volatile uint32 *marker = (uint32 *)msg + (num_words - 1); /* last word */
658         dhd_prot_t *prot = dhd->prot;
659         uint32 step = 0;
660         uint32 delay = PCIE_D2H_SYNC_DELAY;
661         uint32 total_tries = 0;
662
663         ASSERT(msglen == ring->item_len);
664
665         BCM_REFERENCE(delay);
666         /*
667          * For retries we have to make some sort of stepper algorithm.
668          * We see that every time when the Dongle comes out of the D3
669          * Cold state, the first D2H mem2mem DMA takes more time to
670          * complete, leading to livelock issues.
671          *
672          * Case 1 - Apart from Host CPU some other bus master is
673          * accessing the DDR port, probably page close to the ring
674          * so, PCIE does not get a change to update the memory.
675          * Solution - Increase the number of tries.
676          *
677          * Case 2 - The 50usec delay given by the Host CPU is not
678          * sufficient for the PCIe RC to start its work.
679          * In this case the breathing time of 50usec given by
680          * the Host CPU is not sufficient.
681          * Solution: Increase the delay in a stepper fashion.
682          * This is done to ensure that there are no
683          * unwanted extra delay introdcued in normal conditions.
684          */
685         for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
686                 for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
687                         uint32 msg_seqnum = *marker;
688                         if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
689                                 ring->seqnum++; /* next expected sequence number */
690                                 goto dma_completed;
691                         }
692
693                         total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
694
695                         if (total_tries > prot->d2h_sync_wait_max)
696                                 prot->d2h_sync_wait_max = total_tries;
697
698                         OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
699                         OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
700 #if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)
701                         /* For ARM there is no pause in cpu_relax, so add extra delay */
702                         OSL_DELAY(delay * step);
703 #endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */
704                 } /* for PCIE_D2H_SYNC_WAIT_TRIES */
705         } /* for number of steps */
706
707         dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen);
708
709         ring->seqnum++; /* skip this message ... leak of a pktid */
710         return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
711
712 dma_completed:
713
714         prot->d2h_sync_wait_tot += total_tries;
715         return msg->msg_type;
716 }
717
718 /**
719  * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
720  * mode. The xorcsum is placed in the last word of a message. Dongle will also
721  * place a seqnum in the epoch field of the cmn_msg_hdr.
722  */
723 static uint8 BCMFASTPATH
724 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
725                           volatile cmn_msg_hdr_t *msg, int msglen)
726 {
727         uint32 tries;
728         uint32 prot_checksum = 0; /* computed checksum */
729         int num_words = msglen / sizeof(uint32); /* num of 32bit words */
730         uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
731         dhd_prot_t *prot = dhd->prot;
732         uint32 step = 0;
733         uint32 delay = PCIE_D2H_SYNC_DELAY;
734         uint32 total_tries = 0;
735
736         ASSERT(msglen == ring->item_len);
737
738         BCM_REFERENCE(delay);
739
740         /*
741          * For retries we have to make some sort of stepper algorithm.
742          * We see that every time when the Dongle comes out of the D3
743          * Cold state, the first D2H mem2mem DMA takes more time to
744          * complete, leading to livelock issues.
745          *
746          * Case 1 - Apart from Host CPU some other bus master is
747          * accessing the DDR port, probably page close to the ring
748          * so, PCIE does not get a change to update the memory.
749          * Solution - Increase the number of tries.
750          *
751          * Case 2 - The 50usec delay given by the Host CPU is not
752          * sufficient for the PCIe RC to start its work.
753          * In this case the breathing time of 50usec given by
754          * the Host CPU is not sufficient.
755          * Solution: Increase the delay in a stepper fashion.
756          * This is done to ensure that there are no
757          * unwanted extra delay introdcued in normal conditions.
758          */
759         for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
760                 for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
761                         prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words);
762                         if (prot_checksum == 0U) { /* checksum is OK */
763                                 if (msg->epoch == ring_seqnum) {
764                                         ring->seqnum++; /* next expected sequence number */
765                                         goto dma_completed;
766                                 }
767                         }
768
769                         total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
770
771                         if (total_tries > prot->d2h_sync_wait_max)
772                                 prot->d2h_sync_wait_max = total_tries;
773
774                         OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
775                         OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
776 #if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)
777                         /* For ARM there is no pause in cpu_relax, so add extra delay */
778                         OSL_DELAY(delay * step);
779 #endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */
780
781                 } /* for PCIE_D2H_SYNC_WAIT_TRIES */
782         } /* for number of steps */
783
784         dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen);
785
786         ring->seqnum++; /* skip this message ... leak of a pktid */
787         return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
788
789 dma_completed:
790
791         prot->d2h_sync_wait_tot += total_tries;
792         return msg->msg_type;
793 }
794
795 /**
796  * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
797  * need to try to sync. This noop sync handler will be bound when the dongle
798  * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
799  */
800 static uint8 BCMFASTPATH
801 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
802                        volatile cmn_msg_hdr_t *msg, int msglen)
803 {
804         return msg->msg_type;
805 }
806
807 /**
808  * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
809  * dongle advertizes.
810  */
811 static void
812 dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
813 {
814         dhd_prot_t *prot = dhd->prot;
815         prot->d2h_sync_wait_max = 0UL;
816         prot->d2h_sync_wait_tot = 0UL;
817
818         prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
819         prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
820         prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
821
822         if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
823                 prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
824         } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
825                 prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
826         } else {
827                 prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
828         }
829 }
830
831 #endif /* PCIE_D2H_SYNC */
832
833 int INLINE
834 dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
835 {
836         /* To synchronize with the previous memory operations call wmb() */
837         OSL_SMP_WMB();
838         dhd->prot->ioctl_received = reason;
839         /* Call another wmb() to make sure before waking up the other event value gets updated */
840         OSL_SMP_WMB();
841         dhd_os_ioctl_resp_wake(dhd);
842         return 0;
843 }
844
845 /**
846  * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
847  */
848 static void
849 dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
850 {
851         dhd_prot_t *prot = dhd->prot;
852         prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
853         prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
854 }
855
856 /* +-----------------  End of PCIE DHD H2D DMA SYNC ------------------------+ */
857
858
859 /*
860  * +---------------------------------------------------------------------------+
861  * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
862  * virtual and physical address, the buffer lenght and the DMA handler.
863  * A secdma handler is also included in the dhd_dma_buf object.
864  * +---------------------------------------------------------------------------+
865  */
866
867 static INLINE void
868 dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
869 {
870         base_addr->low_addr = htol32(PHYSADDRLO(pa));
871         base_addr->high_addr = htol32(PHYSADDRHI(pa));
872 }
873
874
875 /**
876  * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
877  */
878 static int
879 dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
880 {
881         uint32 base, end; /* dongle uses 32bit ptr arithmetic */
882
883         ASSERT(dma_buf);
884         base = PHYSADDRLO(dma_buf->pa);
885         ASSERT(base);
886         ASSERT(ISALIGNED(base, DMA_ALIGN_LEN));
887         ASSERT(dma_buf->len != 0);
888
889         /* test 32bit offset arithmetic over dma buffer for loss of carry-over */
890         end = (base + dma_buf->len); /* end address */
891
892         if ((end & 0xFFFFFFFF) < (base & 0xFFFFFFFF)) { /* exclude carryover */
893                 DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
894                         __FUNCTION__, base, dma_buf->len));
895                 return BCME_ERROR;
896         }
897
898         return BCME_OK;
899 }
900
901 /**
902  * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
903  * returns BCME_OK=0 on success
904  * returns non-zero negative error value on failure.
905  */
906 static int
907 dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
908 {
909         uint32 dma_pad = 0;
910         osl_t *osh = dhd->osh;
911
912         ASSERT(dma_buf != NULL);
913         ASSERT(dma_buf->va == NULL);
914         ASSERT(dma_buf->len == 0);
915
916         /* Pad the buffer length by one extra cacheline size.
917          * Required for D2H direction.
918          */
919         dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
920         dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
921                 DMA_ALIGN_LEN, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
922
923         if (dma_buf->va == NULL) {
924                 DHD_ERROR(("%s: buf_len %d, no memory available\n",
925                         __FUNCTION__, buf_len));
926                 return BCME_NOMEM;
927         }
928
929         dma_buf->len = buf_len; /* not including padded len */
930
931         if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
932                 dhd_dma_buf_free(dhd, dma_buf);
933                 return BCME_ERROR;
934         }
935
936         dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
937
938         return BCME_OK;
939 }
940
941 /**
942  * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
943  */
944 static void
945 dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
946 {
947         if ((dma_buf == NULL) || (dma_buf->va == NULL)) {
948                 return;
949         }
950
951         (void)dhd_dma_buf_audit(dhd, dma_buf);
952
953         /* Zero out the entire buffer and cache flush */
954         memset((void*)dma_buf->va, 0, dma_buf->len);
955         OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
956 }
957
958 /**
959  * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
960  * dhd_dma_buf_alloc().
961  */
962 static void
963 dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
964 {
965         osl_t *osh = dhd->osh;
966
967         ASSERT(dma_buf);
968
969         if (dma_buf->va == NULL) {
970                 return; /* Allow for free invocation, when alloc failed */
971         }
972
973         /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
974         (void)dhd_dma_buf_audit(dhd, dma_buf);
975
976         /* dma buffer may have been padded at allocation */
977         DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
978                 dma_buf->pa, dma_buf->dmah);
979
980         memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
981 }
982
983 /**
984  * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
985  * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
986  */
987 void
988 dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
989         void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
990 {
991         dhd_dma_buf_t *dma_buf;
992         ASSERT(dhd_dma_buf);
993         dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
994         dma_buf->va = va;
995         dma_buf->len = len;
996         dma_buf->pa = pa;
997         dma_buf->dmah = dmah;
998         dma_buf->secdma = secdma;
999
1000         /* Audit user defined configuration */
1001         (void)dhd_dma_buf_audit(dhd, dma_buf);
1002 }
1003
1004 /* +------------------  End of PCIE DHD DMA BUF ADT ------------------------+ */
1005
1006 /*
1007  * +---------------------------------------------------------------------------+
1008  * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
1009  * Main purpose is to save memory on the dongle, has other purposes as well.
1010  * The packet id map, also includes storage for some packet parameters that
1011  * may be saved. A native packet pointer along with the parameters may be saved
1012  * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
1013  * and the metadata may be retrieved using the previously allocated packet id.
1014  * +---------------------------------------------------------------------------+
1015  */
1016 #define DHD_PCIE_PKTID
1017 #define MAX_PKTID_ITEMS     (3072) /* Maximum number of pktids supported */
1018
1019 /* On Router, the pktptr serves as a pktid. */
1020
1021
1022 #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
1023 #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
1024 #endif
1025
1026 /* Enum for marking the buffer color based on usage */
1027 typedef enum dhd_pkttype {
1028         PKTTYPE_DATA_TX = 0,
1029         PKTTYPE_DATA_RX,
1030         PKTTYPE_IOCTL_RX,
1031         PKTTYPE_EVENT_RX,
1032         /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
1033         PKTTYPE_NO_CHECK
1034 } dhd_pkttype_t;
1035
1036 #define DHD_PKTID_INVALID               (0U)
1037 #define DHD_IOCTL_REQ_PKTID             (0xFFFE)
1038 #define DHD_FAKE_PKTID                  (0xFACE)
1039
1040 #define DHD_PKTID_FREE_LOCKER           (FALSE)
1041 #define DHD_PKTID_RSV_LOCKER            (TRUE)
1042
1043 typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
1044
1045 /* Construct a packet id mapping table, returning an opaque map handle */
1046 static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index);
1047
1048 /* Destroy a packet id mapping table, freeing all packets active in the table */
1049 static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
1050
1051 #define PKTID_MAP_HANDLE        (0)
1052 #define PKTID_MAP_HANDLE_IOCTL  (1)
1053
1054 #define DHD_NATIVE_TO_PKTID_INIT(dhd, items, index) dhd_pktid_map_init((dhd), (items), (index))
1055 #define DHD_NATIVE_TO_PKTID_FINI(dhd, map)   dhd_pktid_map_fini((dhd), (map))
1056
1057 #if defined(DHD_PCIE_PKTID)
1058
1059
1060 /* Determine number of pktids that are available */
1061 static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
1062
1063 /* Allocate a unique pktid against which a pkt and some metadata is saved */
1064 static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1065         void *pkt);
1066 static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1067         void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
1068         void *dmah, void *secdma, dhd_pkttype_t pkttype);
1069 static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1070         void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
1071         void *dmah, void *secdma, dhd_pkttype_t pkttype);
1072
1073 /* Return an allocated pktid, retrieving previously saved pkt and metadata */
1074 static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1075         uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
1076         void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
1077
1078 /*
1079  * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
1080  *
1081  * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
1082  * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
1083  *
1084  * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
1085  *    either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
1086  */
1087 #if defined(DHD_PKTID_AUDIT_ENABLED)
1088 #define USE_DHD_PKTID_AUDIT_LOCK 1
1089 /* Audit the pktidmap allocator */
1090 /* #define DHD_PKTID_AUDIT_MAP */
1091
1092 /* Audit the pktid during production/consumption of workitems */
1093 #define DHD_PKTID_AUDIT_RING
1094
1095 #if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
1096 #error "May only enabled audit of MAP or RING, at a time."
1097 #endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
1098
1099 #define DHD_DUPLICATE_ALLOC     1
1100 #define DHD_DUPLICATE_FREE      2
1101 #define DHD_TEST_IS_ALLOC       3
1102 #define DHD_TEST_IS_FREE        4
1103
1104 #ifdef USE_DHD_PKTID_AUDIT_LOCK
1105 #define DHD_PKTID_AUDIT_LOCK_INIT(osh)          dhd_os_spin_lock_init(osh)
1106 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  dhd_os_spin_lock_deinit(osh, lock)
1107 #define DHD_PKTID_AUDIT_LOCK(lock)              dhd_os_spin_lock(lock)
1108 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     dhd_os_spin_unlock(lock, flags)
1109 #else
1110 #define DHD_PKTID_AUDIT_LOCK_INIT(osh)          (void *)(1)
1111 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  do { /* noop */ } while (0)
1112 #define DHD_PKTID_AUDIT_LOCK(lock)              0
1113 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     do { /* noop */ } while (0)
1114 #endif /* !USE_DHD_PKTID_AUDIT_LOCK */
1115
1116 #endif /* DHD_PKTID_AUDIT_ENABLED */
1117
1118 /* #define USE_DHD_PKTID_LOCK   1 */
1119
1120 #ifdef USE_DHD_PKTID_LOCK
1121 #define DHD_PKTID_LOCK_INIT(osh)                dhd_os_spin_lock_init(osh)
1122 #define DHD_PKTID_LOCK_DEINIT(osh, lock)        dhd_os_spin_lock_deinit(osh, lock)
1123 #define DHD_PKTID_LOCK(lock)                    dhd_os_spin_lock(lock)
1124 #define DHD_PKTID_UNLOCK(lock, flags)           dhd_os_spin_unlock(lock, flags)
1125 #else
1126 #define DHD_PKTID_LOCK_INIT(osh)                (void *)(1)
1127 #define DHD_PKTID_LOCK_DEINIT(osh, lock)        \
1128         do { \
1129                 BCM_REFERENCE(osh); \
1130                 BCM_REFERENCE(lock); \
1131         } while (0)
1132 #define DHD_PKTID_LOCK(lock)                    0
1133 #define DHD_PKTID_UNLOCK(lock, flags)           \
1134         do { \
1135                 BCM_REFERENCE(lock); \
1136                 BCM_REFERENCE(flags); \
1137         } while (0)
1138 #endif /* !USE_DHD_PKTID_LOCK */
1139
1140 /* Packet metadata saved in packet id mapper */
1141
1142 /* The Locker can be 3 states
1143  * LOCKER_IS_FREE - Locker is free and can be allocated
1144  * LOCKER_IS_BUSY - Locker is assigned and is being used, values in the
1145  *                  locker (buffer address, len, phy addr etc) are populated
1146  *                  with valid values
1147  * LOCKER_IS_RSVD - The locker is reserved for future use, but the values
1148  *                  in the locker are not valid. Especially pkt should be
1149  *                  NULL in this state. When the user wants to re-use the
1150  *                  locker dhd_pktid_map_free can be called with a flag
1151  *                  to reserve the pktid for future use, which will clear
1152  *                  the contents of the locker. When the user calls
1153  *                  dhd_pktid_map_save the locker would move to LOCKER_IS_BUSY
1154  */
1155 typedef enum dhd_locker_state {
1156         LOCKER_IS_FREE,
1157         LOCKER_IS_BUSY,
1158         LOCKER_IS_RSVD
1159 } dhd_locker_state_t;
1160
1161 typedef struct dhd_pktid_item {
1162         dhd_locker_state_t state;  /* tag a locker to be free, busy or reserved */
1163         uint8         dir;      /* dma map direction (Tx=flush or Rx=invalidate) */
1164         dhd_pkttype_t pkttype;  /* pktlists are maintained based on pkttype */
1165         uint16        len;      /* length of mapped packet's buffer */
1166         void          *pkt;     /* opaque native pointer to a packet */
1167         dmaaddr_t     pa;       /* physical address of mapped packet's buffer */
1168         void          *dmah;    /* handle to OS specific DMA map */
1169         void          *secdma;
1170 } dhd_pktid_item_t;
1171
1172 typedef struct dhd_pktid_map {
1173         uint32      items;    /* total items in map */
1174         uint32      avail;    /* total available items */
1175         int         failures; /* lockers unavailable count */
1176         /* Spinlock to protect dhd_pktid_map in process/tasklet context */
1177         void        *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
1178
1179 #if defined(DHD_PKTID_AUDIT_ENABLED)
1180         void        *pktid_audit_lock;
1181         struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
1182 #endif /* DHD_PKTID_AUDIT_ENABLED */
1183
1184         uint32      keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */
1185         dhd_pktid_item_t lockers[0];           /* metadata storage */
1186 } dhd_pktid_map_t;
1187
1188 /*
1189  * PktId (Locker) #0 is never allocated and is considered invalid.
1190  *
1191  * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
1192  * depleted pktid pool and must not be used by the caller.
1193  *
1194  * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
1195  */
1196
1197 #define DHD_PKTID_ITEM_SZ               (sizeof(dhd_pktid_item_t))
1198 #define DHD_PKIDMAP_ITEMS(items)        (items)
1199 #define DHD_PKTID_MAP_SZ(items)         (sizeof(dhd_pktid_map_t) + \
1200                                         (DHD_PKTID_ITEM_SZ * ((items) + 1)))
1201
1202 #define DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, map)  dhd_pktid_map_fini_ioctl((dhd), (map))
1203
1204 /* Convert a packet to a pktid, and save pkt pointer in busy locker */
1205 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt)    dhd_pktid_map_reserve((dhd), (map), (pkt))
1206
1207 /* Reuse a previously reserved locker to save packet params */
1208 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
1209         dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
1210                            (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1211                            (dhd_pkttype_t)(pkttype))
1212
1213 /* Convert a packet to a pktid, and save packet params in locker */
1214 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
1215         dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
1216                             (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1217                             (dhd_pkttype_t)(pkttype))
1218
1219 /* Convert pktid to a packet, and free the locker */
1220 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1221         dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1222         (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1223         (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
1224
1225 /* Convert the pktid to a packet, empty locker, but keep it reserved */
1226 #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1227         dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1228         (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1229         (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
1230
1231 #define DHD_PKTID_AVAIL(map)                 dhd_pktid_map_avail_cnt(map)
1232
1233 #if defined(DHD_PKTID_AUDIT_ENABLED)
1234
1235 static int dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
1236         const int test_for, const char *errmsg);
1237
1238 /**
1239 * dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
1240 */
1241 static int
1242 dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
1243         const int test_for, const char *errmsg)
1244 {
1245 #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
1246
1247         const uint32 max_pktid_items = (MAX_PKTID_ITEMS);
1248         struct bcm_mwbmap *handle;
1249         uint32  flags;
1250         bool ignore_audit;
1251
1252         if (pktid_map == (dhd_pktid_map_t *)NULL) {
1253                 DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
1254                 return BCME_OK;
1255         }
1256
1257         flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
1258
1259         handle = pktid_map->pktid_audit;
1260         if (handle == (struct bcm_mwbmap *)NULL) {
1261                 DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
1262                 DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1263                 return BCME_OK;
1264         }
1265
1266         /* Exclude special pktids from audit */
1267         ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
1268         if (ignore_audit) {
1269                 DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1270                 return BCME_OK;
1271         }
1272
1273         if ((pktid == DHD_PKTID_INVALID) || (pktid > max_pktid_items)) {
1274                 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
1275                 /* lock is released in "error" */
1276                 goto error;
1277         }
1278
1279         /* Perform audit */
1280         switch (test_for) {
1281                 case DHD_DUPLICATE_ALLOC:
1282                         if (!bcm_mwbmap_isfree(handle, pktid)) {
1283                                 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
1284                                            errmsg, pktid));
1285                                 goto error;
1286                         }
1287                         bcm_mwbmap_force(handle, pktid);
1288                         break;
1289
1290                 case DHD_DUPLICATE_FREE:
1291                         if (bcm_mwbmap_isfree(handle, pktid)) {
1292                                 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
1293                                            errmsg, pktid));
1294                                 goto error;
1295                         }
1296                         bcm_mwbmap_free(handle, pktid);
1297                         break;
1298
1299                 case DHD_TEST_IS_ALLOC:
1300                         if (bcm_mwbmap_isfree(handle, pktid)) {
1301                                 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
1302                                            errmsg, pktid));
1303                                 goto error;
1304                         }
1305                         break;
1306
1307                 case DHD_TEST_IS_FREE:
1308                         if (!bcm_mwbmap_isfree(handle, pktid)) {
1309                                 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
1310                                            errmsg, pktid));
1311                                 goto error;
1312                         }
1313                         break;
1314
1315                 default:
1316                         goto error;
1317         }
1318
1319         DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1320         return BCME_OK;
1321
1322 error:
1323
1324         DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1325         /* May insert any trap mechanism here ! */
1326         dhd_pktid_audit_fail_cb(dhd);
1327
1328         return BCME_ERROR;
1329 }
1330
1331 #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
1332         dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
1333
1334 #endif /* DHD_PKTID_AUDIT_ENABLED */
1335
1336 /* +------------------  End of PCIE DHD PKTID AUDIT ------------------------+ */
1337
1338
1339 /**
1340  * +---------------------------------------------------------------------------+
1341  * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
1342  *
1343  * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS].
1344  *
1345  * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
1346  * packet id is returned. This unique packet id may be used to retrieve the
1347  * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
1348  * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
1349  * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
1350  *
1351  * Implementation Note:
1352  * Convert this into a <key,locker> abstraction and place into bcmutils !
1353  * Locker abstraction should treat contents as opaque storage, and a
1354  * callback should be registered to handle busy lockers on destructor.
1355  *
1356  * +---------------------------------------------------------------------------+
1357  */
1358
1359 /** Allocate and initialize a mapper of num_items <numbered_key, locker> */
1360
1361 static dhd_pktid_map_handle_t *
1362 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index)
1363 {
1364         void *osh;
1365         uint32 nkey;
1366         dhd_pktid_map_t *map;
1367         uint32 dhd_pktid_map_sz;
1368         uint32 map_items;
1369 #ifdef DHD_USE_STATIC_PKTIDMAP
1370         uint32 section;
1371 #endif /* DHD_USE_STATIC_PKTIDMAP */
1372         osh = dhd->osh;
1373
1374         ASSERT((num_items >= 1) && (num_items <= MAX_PKTID_ITEMS));
1375         dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
1376
1377 #ifdef DHD_USE_STATIC_PKTIDMAP
1378         if (index == PKTID_MAP_HANDLE) {
1379                 section = DHD_PREALLOC_PKTID_MAP;
1380         } else {
1381                 section = DHD_PREALLOC_PKTID_MAP_IOCTL;
1382         }
1383
1384         map = (dhd_pktid_map_t *)DHD_OS_PREALLOC(dhd, section, dhd_pktid_map_sz);
1385 #else
1386         map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz);
1387 #endif /* DHD_USE_STATIC_PKTIDMAP */
1388
1389         if (map == NULL) {
1390                 DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
1391                         __FUNCTION__, __LINE__, dhd_pktid_map_sz));
1392                 goto error;
1393         }
1394
1395         bzero(map, dhd_pktid_map_sz);
1396
1397         /* Initialize the lock that protects this structure */
1398         map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
1399         if (map->pktid_lock == NULL) {
1400                 DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
1401                 goto error;
1402         }
1403
1404         map->items = num_items;
1405         map->avail = num_items;
1406
1407         map_items = DHD_PKIDMAP_ITEMS(map->items);
1408
1409 #if defined(DHD_PKTID_AUDIT_ENABLED)
1410         /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
1411         map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
1412         if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
1413                 DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
1414                 goto error;
1415         } else {
1416                 DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
1417                         __FUNCTION__, __LINE__, map_items + 1));
1418         }
1419
1420         map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
1421
1422 #endif /* DHD_PKTID_AUDIT_ENABLED */
1423
1424         for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
1425                 map->keys[nkey] = nkey; /* populate with unique keys */
1426                 map->lockers[nkey].state = LOCKER_IS_FREE;
1427                 map->lockers[nkey].pkt   = NULL; /* bzero: redundant */
1428                 map->lockers[nkey].len   = 0;
1429         }
1430
1431         /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be busy */
1432         map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY;
1433         map->lockers[DHD_PKTID_INVALID].pkt   = NULL; /* bzero: redundant */
1434         map->lockers[DHD_PKTID_INVALID].len   = 0;
1435
1436 #if defined(DHD_PKTID_AUDIT_ENABLED)
1437         /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
1438         bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
1439 #endif /* DHD_PKTID_AUDIT_ENABLED */
1440
1441         return (dhd_pktid_map_handle_t *)map; /* opaque handle */
1442
1443 error:
1444
1445         if (map) {
1446
1447 #if defined(DHD_PKTID_AUDIT_ENABLED)
1448                 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1449                         bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
1450                         map->pktid_audit = (struct bcm_mwbmap *)NULL;
1451                         if (map->pktid_audit_lock)
1452                                 DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
1453                 }
1454 #endif /* DHD_PKTID_AUDIT_ENABLED */
1455
1456                 if (map->pktid_lock)
1457                         DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
1458
1459                 MFREE(osh, map, dhd_pktid_map_sz);
1460         }
1461
1462         return (dhd_pktid_map_handle_t *)NULL;
1463 }
1464
1465 /**
1466  * Retrieve all allocated keys and free all <numbered_key, locker>.
1467  * Freeing implies: unmapping the buffers and freeing the native packet
1468  * This could have been a callback registered with the pktid mapper.
1469  */
1470
1471 static void
1472 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1473 {
1474         void *osh;
1475         uint32 nkey;
1476         dhd_pktid_map_t *map;
1477         uint32 dhd_pktid_map_sz;
1478         dhd_pktid_item_t *locker;
1479         uint32 map_items;
1480         uint32 flags;
1481
1482         if (handle == NULL) {
1483                 return;
1484         }
1485
1486         map = (dhd_pktid_map_t *)handle;
1487         flags =  DHD_PKTID_LOCK(map->pktid_lock);
1488         osh = dhd->osh;
1489
1490         dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
1491
1492         nkey = 1; /* skip reserved KEY #0, and start from 1 */
1493         locker = &map->lockers[nkey];
1494
1495         map_items = DHD_PKIDMAP_ITEMS(map->items);
1496
1497         for (; nkey <= map_items; nkey++, locker++) {
1498
1499                 if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */
1500
1501                         locker->state = LOCKER_IS_FREE; /* force open the locker */
1502
1503 #if defined(DHD_PKTID_AUDIT_ENABLED)
1504                         DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
1505 #endif /* DHD_PKTID_AUDIT_ENABLED */
1506
1507                         {   /* This could be a callback registered with dhd_pktid_map */
1508                                 DMA_UNMAP(osh, locker->pa, locker->len,
1509                                         locker->dir, 0, DHD_DMAH_NULL);
1510                                 dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
1511                                         locker->pkttype, TRUE);
1512                         }
1513                 }
1514 #if defined(DHD_PKTID_AUDIT_ENABLED)
1515                 else {
1516                         DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
1517                 }
1518 #endif /* DHD_PKTID_AUDIT_ENABLED */
1519
1520                 locker->pkt = NULL; /* clear saved pkt */
1521                 locker->len = 0;
1522         }
1523
1524 #if defined(DHD_PKTID_AUDIT_ENABLED)
1525         if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1526                 bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
1527                 map->pktid_audit = (struct bcm_mwbmap *)NULL;
1528                 if (map->pktid_audit_lock) {
1529                         DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
1530                 }
1531         }
1532 #endif /* DHD_PKTID_AUDIT_ENABLED */
1533
1534         DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1535         DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
1536
1537 #ifdef DHD_USE_STATIC_PKTIDMAP
1538         DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz);
1539 #else
1540         MFREE(osh, handle, dhd_pktid_map_sz);
1541 #endif /* DHD_USE_STATIC_PKTIDMAP */
1542 }
1543
1544 #ifdef IOCTLRESP_USE_CONSTMEM
1545 /** Called in detach scenario. Releasing IOCTL buffers. */
1546 static void
1547 dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1548 {
1549         uint32 nkey;
1550         dhd_pktid_map_t *map;
1551         uint32 dhd_pktid_map_sz;
1552         dhd_pktid_item_t *locker;
1553         uint32 map_items;
1554         uint32 flags;
1555         osl_t *osh = dhd->osh;
1556
1557         if (handle == NULL) {
1558                 return;
1559         }
1560
1561         map = (dhd_pktid_map_t *)handle;
1562         flags = DHD_PKTID_LOCK(map->pktid_lock);
1563
1564         dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
1565
1566         nkey = 1; /* skip reserved KEY #0, and start from 1 */
1567         locker = &map->lockers[nkey];
1568
1569         map_items = DHD_PKIDMAP_ITEMS(map->items);
1570
1571         for (; nkey <= map_items; nkey++, locker++) {
1572
1573                 if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */
1574
1575                         locker->state = LOCKER_IS_FREE; /* force open the locker */
1576
1577 #if defined(DHD_PKTID_AUDIT_ENABLED)
1578                         DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
1579 #endif /* DHD_PKTID_AUDIT_ENABLED */
1580
1581                         {
1582                                 dhd_dma_buf_t retbuf;
1583                                 retbuf.va = locker->pkt;
1584                                 retbuf.len = locker->len;
1585                                 retbuf.pa = locker->pa;
1586                                 retbuf.dmah = locker->dmah;
1587                                 retbuf.secdma = locker->secdma;
1588
1589                                 /* This could be a callback registered with dhd_pktid_map */
1590                                 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1591                                 free_ioctl_return_buffer(dhd, &retbuf);
1592                                 flags = DHD_PKTID_LOCK(map->pktid_lock);
1593                         }
1594                 }
1595 #if defined(DHD_PKTID_AUDIT_ENABLED)
1596                 else {
1597                         DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
1598                 }
1599 #endif /* DHD_PKTID_AUDIT_ENABLED */
1600
1601                 locker->pkt = NULL; /* clear saved pkt */
1602                 locker->len = 0;
1603         }
1604
1605 #if defined(DHD_PKTID_AUDIT_ENABLED)
1606         if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1607                 bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
1608                 map->pktid_audit = (struct bcm_mwbmap *)NULL;
1609                 if (map->pktid_audit_lock) {
1610                         DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
1611                 }
1612         }
1613 #endif /* DHD_PKTID_AUDIT_ENABLED */
1614
1615         DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1616         DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
1617
1618 #ifdef DHD_USE_STATIC_PKTIDMAP
1619         DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz);
1620 #else
1621         MFREE(osh, handle, dhd_pktid_map_sz);
1622 #endif /* DHD_USE_STATIC_PKTIDMAP */
1623 }
1624 #endif /* IOCTLRESP_USE_CONSTMEM */
1625
1626 /** Get the pktid free count */
1627 static INLINE uint32 BCMFASTPATH
1628 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
1629 {
1630         dhd_pktid_map_t *map;
1631         uint32  flags;
1632         uint32  avail;
1633
1634         ASSERT(handle != NULL);
1635         map = (dhd_pktid_map_t *)handle;
1636
1637         flags = DHD_PKTID_LOCK(map->pktid_lock);
1638         avail = map->avail;
1639         DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1640
1641         return avail;
1642 }
1643
1644 /**
1645  * Allocate locker, save pkt contents, and return the locker's numbered key.
1646  * dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility.
1647  * Caller must treat a returned value DHD_PKTID_INVALID as a failure case,
1648  * implying a depleted pool of pktids.
1649  */
1650
1651 static INLINE uint32
1652 __dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt)
1653 {
1654         uint32 nkey;
1655         dhd_pktid_map_t *map;
1656         dhd_pktid_item_t *locker;
1657
1658         ASSERT(handle != NULL);
1659         map = (dhd_pktid_map_t *)handle;
1660
1661         if (map->avail <= 0) { /* no more pktids to allocate */
1662                 map->failures++;
1663                 DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
1664                 return DHD_PKTID_INVALID; /* failed alloc request */
1665         }
1666
1667         ASSERT(map->avail <= map->items);
1668         nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
1669         locker = &map->lockers[nkey]; /* save packet metadata in locker */
1670         map->avail--;
1671         locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
1672         locker->len = 0;
1673         locker->state = LOCKER_IS_BUSY; /* reserve this locker */
1674
1675 #if defined(DHD_PKTID_AUDIT_MAP)
1676         DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_ALLOC); /* Audit duplicate alloc */
1677 #endif /* DHD_PKTID_AUDIT_MAP */
1678
1679         ASSERT(nkey != DHD_PKTID_INVALID);
1680         return nkey; /* return locker's numbered key */
1681 }
1682
1683
1684 /**
1685  * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
1686  * yet populated. Invoke the pktid save api to populate the packet parameters
1687  * into the locker.
1688  * Wrapper that takes the required lock when called directly.
1689  */
1690 static INLINE uint32
1691 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt)
1692 {
1693         dhd_pktid_map_t *map;
1694         uint32 flags;
1695         uint32 ret;
1696
1697         ASSERT(handle != NULL);
1698         map = (dhd_pktid_map_t *)handle;
1699         flags = DHD_PKTID_LOCK(map->pktid_lock);
1700         ret = __dhd_pktid_map_reserve(dhd, handle, pkt);
1701         DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1702
1703         return ret;
1704 }
1705
1706 static INLINE void
1707 __dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
1708         uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
1709         dhd_pkttype_t pkttype)
1710 {
1711         dhd_pktid_map_t *map;
1712         dhd_pktid_item_t *locker;
1713
1714         ASSERT(handle != NULL);
1715         map = (dhd_pktid_map_t *)handle;
1716
1717         ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items)));
1718
1719         locker = &map->lockers[nkey];
1720
1721         ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
1722                 ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
1723
1724 #if defined(DHD_PKTID_AUDIT_MAP)
1725         DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */
1726 #endif /* DHD_PKTID_AUDIT_MAP */
1727
1728         /* store contents in locker */
1729         locker->dir = dir;
1730         locker->pa = pa;
1731         locker->len = (uint16)len; /* 16bit len */
1732         locker->dmah = dmah; /* 16bit len */
1733         locker->secdma = secdma;
1734         locker->pkttype = pkttype;
1735         locker->pkt = pkt;
1736         locker->state = LOCKER_IS_BUSY; /* make this locker busy */
1737 }
1738
1739 /**
1740  * dhd_pktid_map_save - Save a packet's parameters into a locker corresponding
1741  * to a previously reserved unique numbered key.
1742  * Wrapper that takes the required lock when called directly.
1743  */
1744 static INLINE void
1745 dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
1746         uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
1747         dhd_pkttype_t pkttype)
1748 {
1749         dhd_pktid_map_t *map;
1750         uint32 flags;
1751
1752         ASSERT(handle != NULL);
1753         map = (dhd_pktid_map_t *)handle;
1754         flags = DHD_PKTID_LOCK(map->pktid_lock);
1755         __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, len,
1756                 dir, dmah, secdma, pkttype);
1757         DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1758 }
1759
1760 /**
1761  * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
1762  * contents into the corresponding locker. Return the numbered key.
1763  */
1764 static uint32 BCMFASTPATH
1765 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
1766         dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
1767         dhd_pkttype_t pkttype)
1768 {
1769         uint32 nkey;
1770         uint32 flags;
1771         dhd_pktid_map_t *map;
1772
1773         ASSERT(handle != NULL);
1774         map = (dhd_pktid_map_t *)handle;
1775
1776         flags = DHD_PKTID_LOCK(map->pktid_lock);
1777
1778         nkey = __dhd_pktid_map_reserve(dhd, handle, pkt);
1779         if (nkey != DHD_PKTID_INVALID) {
1780                 __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
1781                         len, dir, dmah, secdma, pkttype);
1782 #if defined(DHD_PKTID_AUDIT_MAP)
1783                 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */
1784 #endif /* DHD_PKTID_AUDIT_MAP */
1785         }
1786
1787         DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1788
1789         return nkey;
1790 }
1791
1792 /**
1793  * dhd_pktid_map_free - Given a numbered key, return the locker contents.
1794  * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
1795  * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
1796  * value. Only a previously allocated pktid may be freed.
1797  */
1798 static void * BCMFASTPATH
1799 dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
1800         dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma,
1801         dhd_pkttype_t pkttype, bool rsv_locker)
1802 {
1803         dhd_pktid_map_t *map;
1804         dhd_pktid_item_t *locker;
1805         void * pkt;
1806         uint32 flags;
1807
1808         ASSERT(handle != NULL);
1809
1810         map = (dhd_pktid_map_t *)handle;
1811
1812         flags = DHD_PKTID_LOCK(map->pktid_lock);
1813
1814         ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items)));
1815
1816         locker = &map->lockers[nkey];
1817
1818 #if defined(DHD_PKTID_AUDIT_MAP)
1819         DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
1820 #endif /* DHD_PKTID_AUDIT_MAP */
1821
1822         if (locker->state == LOCKER_IS_FREE) { /* Debug check for cloned numbered key */
1823                 DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n",
1824                         __FUNCTION__, __LINE__, nkey));
1825                 ASSERT(locker->state != LOCKER_IS_FREE);
1826
1827                 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1828                 return NULL;
1829         }
1830
1831         /* Check for the colour of the buffer i.e The buffer posted for TX,
1832          * should be freed for TX completion. Similarly the buffer posted for
1833          * IOCTL should be freed for IOCT completion etc.
1834          */
1835         if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
1836
1837                 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1838
1839                 DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
1840                         __FUNCTION__, __LINE__, nkey));
1841                 ASSERT(locker->pkttype == pkttype);
1842
1843                 return NULL;
1844         }
1845
1846         if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
1847                 map->avail++;
1848                 map->keys[map->avail] = nkey; /* make this numbered key available */
1849                 locker->state = LOCKER_IS_FREE; /* open and free Locker */
1850         } else {
1851                 /* pktid will be reused, but the locker does not have a valid pkt */
1852                 locker->state = LOCKER_IS_RSVD;
1853         }
1854
1855 #if defined(DHD_PKTID_AUDIT_MAP)
1856         DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
1857 #endif /* DHD_PKTID_AUDIT_MAP */
1858
1859         *pa = locker->pa; /* return contents of locker */
1860         *len = (uint32)locker->len;
1861         *dmah = locker->dmah;
1862         *secdma = locker->secdma;
1863
1864         pkt = locker->pkt;
1865         locker->pkt = NULL; /* Clear pkt */
1866         locker->len = 0;
1867
1868         DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1869         return pkt;
1870 }
1871
1872 #else /* ! DHD_PCIE_PKTID */
1873
1874
1875 typedef struct pktlist {
1876         PKT_LIST *tx_pkt_list;          /* list for tx packets */
1877         PKT_LIST *rx_pkt_list;          /* list for rx packets */
1878         PKT_LIST *ctrl_pkt_list;        /* list for ioctl/event buf post */
1879 } pktlists_t;
1880
1881 /*
1882  * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
1883  * of a one to one mapping 32bit pktptr and a 32bit pktid.
1884  *
1885  * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
1886  * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
1887  *   a lock.
1888  * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
1889  */
1890 #define DHD_PKTID32(pktptr32)   ((uint32)(pktptr32))
1891 #define DHD_PKTPTR32(pktid32)   ((void *)(pktid32))
1892
1893
1894 static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
1895         dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
1896         dhd_pkttype_t pkttype);
1897 static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
1898         dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
1899         dhd_pkttype_t pkttype);
1900
1901 static dhd_pktid_map_handle_t *
1902 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index)
1903 {
1904         osl_t *osh = dhd->osh;
1905         pktlists_t *handle = NULL;
1906
1907         if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
1908                 DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
1909                            __FUNCTION__, __LINE__, sizeof(pktlists_t)));
1910                 goto error_done;
1911         }
1912
1913         if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
1914                 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
1915                            __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
1916                 goto error;
1917         }
1918
1919         if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
1920                 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
1921                            __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
1922                 goto error;
1923         }
1924
1925         if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
1926                 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
1927                            __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
1928                 goto error;
1929         }
1930
1931         PKTLIST_INIT(handle->tx_pkt_list);
1932         PKTLIST_INIT(handle->rx_pkt_list);
1933         PKTLIST_INIT(handle->ctrl_pkt_list);
1934
1935         return (dhd_pktid_map_handle_t *) handle;
1936
1937 error:
1938         if (handle->ctrl_pkt_list) {
1939                 MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
1940         }
1941
1942         if (handle->rx_pkt_list) {
1943                 MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
1944         }
1945
1946         if (handle->tx_pkt_list) {
1947                 MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
1948         }
1949
1950         if (handle) {
1951                 MFREE(osh, handle, sizeof(pktlists_t));
1952         }
1953
1954 error_done:
1955         return (dhd_pktid_map_handle_t *)NULL;
1956 }
1957
1958 static void
1959 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
1960 {
1961         osl_t *osh = dhd->osh;
1962         pktlists_t *handle = (pktlists_t *) map;
1963
1964         ASSERT(handle != NULL);
1965         if (handle == (pktlists_t *)NULL) {
1966                 return;
1967         }
1968
1969         if (handle->ctrl_pkt_list) {
1970                 PKTLIST_FINI(handle->ctrl_pkt_list);
1971                 MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
1972         }
1973
1974         if (handle->rx_pkt_list) {
1975                 PKTLIST_FINI(handle->rx_pkt_list);
1976                 MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
1977         }
1978
1979         if (handle->tx_pkt_list) {
1980                 PKTLIST_FINI(handle->tx_pkt_list);
1981                 MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
1982         }
1983
1984         if (handle) {
1985                 MFREE(osh, handle, sizeof(pktlists_t));
1986         }
1987 }
1988
1989 /** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
1990 static INLINE uint32
1991 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
1992         dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
1993         dhd_pkttype_t pkttype)
1994 {
1995         pktlists_t *handle = (pktlists_t *) map;
1996         ASSERT(pktptr32 != NULL);
1997         DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
1998         DHD_PKT_SET_DMAH(pktptr32, dmah);
1999         DHD_PKT_SET_PA(pktptr32, pa);
2000         DHD_PKT_SET_SECDMA(pktptr32, secdma);
2001
2002         if (pkttype == PKTTYPE_DATA_TX) {
2003                 PKTLIST_ENQ(handle->tx_pkt_list,  pktptr32);
2004         } else if (pkttype == PKTTYPE_DATA_RX) {
2005                 PKTLIST_ENQ(handle->rx_pkt_list,  pktptr32);
2006         } else {
2007                 PKTLIST_ENQ(handle->ctrl_pkt_list,  pktptr32);
2008         }
2009
2010         return DHD_PKTID32(pktptr32);
2011 }
2012
2013 /** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
2014 static INLINE void *
2015 dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
2016         dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
2017         dhd_pkttype_t pkttype)
2018 {
2019         pktlists_t *handle = (pktlists_t *) map;
2020         void *pktptr32;
2021
2022         ASSERT(pktid32 != 0U);
2023         pktptr32 = DHD_PKTPTR32(pktid32);
2024         *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
2025         *dmah = DHD_PKT_GET_DMAH(pktptr32);
2026         *pa = DHD_PKT_GET_PA(pktptr32);
2027         *secdma = DHD_PKT_GET_SECDMA(pktptr32);
2028
2029         if (pkttype == PKTTYPE_DATA_TX) {
2030                 PKTLIST_UNLINK(handle->tx_pkt_list,  pktptr32);
2031         } else if (pkttype == PKTTYPE_DATA_RX) {
2032                 PKTLIST_UNLINK(handle->rx_pkt_list,  pktptr32);
2033         } else {
2034                 PKTLIST_UNLINK(handle->ctrl_pkt_list,  pktptr32);
2035         }
2036
2037         return pktptr32;
2038 }
2039
2040 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt)  DHD_PKTID32(pkt)
2041
2042 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
2043         ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
2044            dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2045                            (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2046         })
2047
2048 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
2049         ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
2050            dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2051                            (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2052         })
2053
2054 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2055         ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype);  \
2056                 dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
2057                                 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2058                                 (void **)&secdma, (dhd_pkttype_t)(pkttype)); \
2059         })
2060
2061 #define DHD_PKTID_AVAIL(map)  (~0)
2062
2063 #endif /* ! DHD_PCIE_PKTID */
2064
2065 /* +------------------ End of PCIE DHD PKTID MAPPER  -----------------------+ */
2066
2067
2068 /**
2069  * The PCIE FD protocol layer is constructed in two phases:
2070  *    Phase 1. dhd_prot_attach()
2071  *    Phase 2. dhd_prot_init()
2072  *
2073  * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
2074  * All Common rings are allose attached (msgbuf_ring_t objects are allocated
2075  * with DMA-able buffers).
2076  * All dhd_dma_buf_t objects are also allocated here.
2077  *
2078  * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
2079  * initialization of objects that requires information advertized by the dongle
2080  * may not be performed here.
2081  * E.g. the number of TxPost flowrings is not know at this point, neither do
2082  * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
2083  * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
2084  * rings (common + flow).
2085  *
2086  * dhd_prot_init() is invoked after the bus layer has fetched the information
2087  * advertized by the dongle in the pcie_shared_t.
2088  */
2089 int
2090 dhd_prot_attach(dhd_pub_t *dhd)
2091 {
2092         osl_t *osh = dhd->osh;
2093         dhd_prot_t *prot;
2094
2095         /* Allocate prot structure */
2096         if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
2097                 sizeof(dhd_prot_t)))) {
2098                 DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
2099                 goto fail;
2100         }
2101         memset(prot, 0, sizeof(*prot));
2102
2103         prot->osh = osh;
2104         dhd->prot = prot;
2105
2106         /* DMAing ring completes supported? FALSE by default  */
2107         dhd->dma_d2h_ring_upd_support = FALSE;
2108         dhd->dma_h2d_ring_upd_support = FALSE;
2109
2110         /* Common Ring Allocations */
2111
2112         /* Ring  0: H2D Control Submission */
2113         if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
2114                 H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
2115                 BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
2116                 DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
2117                         __FUNCTION__));
2118                 goto fail;
2119         }
2120
2121         /* Ring  1: H2D Receive Buffer Post */
2122         if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
2123                 H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
2124                 BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
2125                 DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
2126                         __FUNCTION__));
2127                 goto fail;
2128         }
2129
2130         /* Ring  2: D2H Control Completion */
2131         if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
2132                 D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
2133                 BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
2134                 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
2135                         __FUNCTION__));
2136                 goto fail;
2137         }
2138
2139         /* Ring  3: D2H Transmit Complete */
2140         if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
2141                 D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
2142                 BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
2143                 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
2144                         __FUNCTION__));
2145                 goto fail;
2146
2147         }
2148
2149         /* Ring  4: D2H Receive Complete */
2150         if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
2151                 D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
2152                 BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
2153                 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
2154                         __FUNCTION__));
2155                 goto fail;
2156
2157         }
2158
2159         /*
2160          * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
2161          * buffers for flowrings will be instantiated, in dhd_prot_init() .
2162          * See dhd_prot_flowrings_pool_attach()
2163          */
2164         /* ioctl response buffer */
2165         if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
2166                 goto fail;
2167         }
2168
2169         /* IOCTL request buffer */
2170         if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
2171                 goto fail;
2172         }
2173
2174         /* Scratch buffer for dma rx offset */
2175         if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) {
2176                 goto fail;
2177         }
2178
2179         /* scratch buffer bus throughput measurement */
2180         if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
2181                 goto fail;
2182         }
2183
2184 #ifdef DHD_RX_CHAINING
2185         dhd_rxchain_reset(&prot->rxchain);
2186 #endif
2187
2188 #if defined(DHD_LB)
2189
2190            /* Initialize the work queues to be used by the Load Balancing logic */
2191 #if defined(DHD_LB_TXC)
2192         {
2193                 void *buffer;
2194                 buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
2195                 bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
2196                         buffer, DHD_LB_WORKQ_SZ);
2197                 prot->tx_compl_prod_sync = 0;
2198                 DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
2199                         __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
2200         }
2201 #endif /* DHD_LB_TXC */
2202
2203 #if defined(DHD_LB_RXC)
2204         {
2205                 void *buffer;
2206                 buffer = MALLOC(dhd->osh, sizeof(uint32) * DHD_LB_WORKQ_SZ);
2207                 bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
2208                         buffer, DHD_LB_WORKQ_SZ);
2209                 prot->rx_compl_prod_sync = 0;
2210                 DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
2211                         __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
2212         }
2213 #endif /* DHD_LB_RXC */
2214
2215 #endif /* DHD_LB */
2216
2217         return BCME_OK;
2218
2219 fail:
2220
2221 #ifndef CONFIG_DHD_USE_STATIC_BUF
2222         if (prot != NULL) {
2223                 dhd_prot_detach(dhd);
2224         }
2225 #endif /* CONFIG_DHD_USE_STATIC_BUF */
2226
2227         return BCME_NOMEM;
2228 } /* dhd_prot_attach */
2229
2230
2231 /**
2232  * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
2233  * completed it's initialization of the pcie_shared structure, we may now fetch
2234  * the dongle advertized features and adjust the protocol layer accordingly.
2235  *
2236  * dhd_prot_init() may be invoked again after a dhd_prot_reset().
2237  */
2238 int
2239 dhd_prot_init(dhd_pub_t *dhd)
2240 {
2241         sh_addr_t base_addr;
2242         dhd_prot_t *prot = dhd->prot;
2243
2244         /* PKTID handle INIT */
2245         if (prot->pktid_map_handle != NULL) {
2246                 DHD_ERROR(("%s: pktid_map_handle already set!\n", __FUNCTION__));
2247                 ASSERT(0);
2248                 return BCME_ERROR;
2249         }
2250
2251 #ifdef IOCTLRESP_USE_CONSTMEM
2252         if (prot->pktid_map_handle_ioctl != NULL) {
2253                 DHD_ERROR(("%s: pktid_map_handle_ioctl already set!\n", __FUNCTION__));
2254                 ASSERT(0);
2255                 return BCME_ERROR;
2256         }
2257 #endif /* IOCTLRESP_USE_CONSTMEM */
2258
2259         prot->pktid_map_handle = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_ITEMS, PKTID_MAP_HANDLE);
2260         if (prot->pktid_map_handle == NULL) {
2261                 DHD_ERROR(("%s: Unable to map packet id's\n", __FUNCTION__));
2262                 ASSERT(0);
2263                 return BCME_NOMEM;
2264         }
2265
2266 #ifdef IOCTLRESP_USE_CONSTMEM
2267         prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
2268                 DHD_FLOWRING_MAX_IOCTLRESPBUF_POST, PKTID_MAP_HANDLE_IOCTL);
2269         if (prot->pktid_map_handle_ioctl == NULL) {
2270                 DHD_ERROR(("%s: Unable to map ioctl response buffers\n", __FUNCTION__));
2271                 ASSERT(0);
2272                 return BCME_NOMEM;
2273         }
2274 #endif /* IOCTLRESP_USE_CONSTMEM */
2275
2276         /* Max pkts in ring */
2277         prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM;
2278
2279         DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count));
2280
2281         /* Read max rx packets supported by dongle */
2282         dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
2283         if (prot->max_rxbufpost == 0) {
2284                 /* This would happen if the dongle firmware is not */
2285                 /* using the latest shared structure template */
2286                 prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
2287         }
2288         DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
2289
2290         /* Initialize.  bzero() would blow away the dma pointers. */
2291         prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
2292         prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
2293
2294         prot->cur_ioctlresp_bufs_posted = 0;
2295         prot->active_tx_count = 0;
2296         prot->data_seq_no = 0;
2297         prot->ioctl_seq_no = 0;
2298         prot->rxbufpost = 0;
2299         prot->cur_event_bufs_posted = 0;
2300         prot->ioctl_state = 0;
2301         prot->curr_ioctl_cmd = 0;
2302         prot->ioctl_received = IOCTL_WAIT;
2303
2304         prot->dmaxfer.srcmem.va = NULL;
2305         prot->dmaxfer.dstmem.va = NULL;
2306         prot->dmaxfer.in_progress = FALSE;
2307
2308         prot->metadata_dbg = FALSE;
2309         prot->rx_metadata_offset = 0;
2310         prot->tx_metadata_offset = 0;
2311         prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
2312
2313         prot->ioctl_trans_id = 0;
2314
2315         /* Register the interrupt function upfront */
2316         /* remove corerev checks in data path */
2317         prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
2318
2319         /* Initialize Common MsgBuf Rings */
2320
2321         dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
2322         dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
2323         dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
2324         dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
2325         dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
2326
2327 #if defined(PCIE_D2H_SYNC)
2328         dhd_prot_d2h_sync_init(dhd);
2329 #endif /* PCIE_D2H_SYNC */
2330
2331         dhd_prot_h2d_sync_init(dhd);
2332
2333         /* init the scratch buffer */
2334         dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
2335         dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2336                 D2H_DMA_SCRATCH_BUF, 0);
2337         dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
2338                 sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
2339
2340         /* If supported by the host, indicate the memory block
2341          * for completion writes / submission reads to shared space
2342          */
2343         if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
2344                 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
2345                 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2346                         D2H_DMA_INDX_WR_BUF, 0);
2347                 dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
2348                 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2349                         H2D_DMA_INDX_RD_BUF, 0);
2350         }
2351
2352         if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
2353                 dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
2354                 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2355                         H2D_DMA_INDX_WR_BUF, 0);
2356                 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
2357                 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2358                         D2H_DMA_INDX_RD_BUF, 0);
2359         }
2360
2361         /*
2362          * If the DMA-able buffers for flowring needs to come from a specific
2363          * contiguous memory region, then setup prot->flowrings_dma_buf here.
2364          * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
2365          * this contiguous memory region, for each of the flowrings.
2366          */
2367
2368         /* Pre-allocate pool of msgbuf_ring for flowrings */
2369         if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
2370                 return BCME_ERROR;
2371         }
2372
2373         /* Host should configure soft doorbells if needed ... here */
2374
2375         /* Post to dongle host configured soft doorbells */
2376         dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
2377
2378         /* Post buffers for packet reception and ioctl/event responses */
2379         dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
2380         dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
2381         dhd_msgbuf_rxbuf_post_event_bufs(dhd);
2382
2383         return BCME_OK;
2384 } /* dhd_prot_init */
2385
2386
2387 /**
2388  * dhd_prot_detach - PCIE FD protocol layer destructor.
2389  * Unlink, frees allocated protocol memory (including dhd_prot)
2390  */
2391 void
2392 dhd_prot_detach(dhd_pub_t *dhd)
2393 {
2394         dhd_prot_t *prot = dhd->prot;
2395
2396         /* Stop the protocol module */
2397         if (prot) {
2398
2399                 /* free up all DMA-able buffers allocated during prot attach/init */
2400
2401                 dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
2402                 dhd_dma_buf_free(dhd, &prot->retbuf); /* ioctl return buffer */
2403                 dhd_dma_buf_free(dhd, &prot->ioctbuf);
2404                 dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
2405
2406                 /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
2407                 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
2408                 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
2409                 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
2410                 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
2411
2412                 /* Common MsgBuf Rings */
2413                 dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
2414                 dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
2415                 dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
2416                 dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
2417                 dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
2418
2419                 /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
2420                 dhd_prot_flowrings_pool_detach(dhd);
2421
2422                 DHD_NATIVE_TO_PKTID_FINI(dhd, dhd->prot->pktid_map_handle);
2423
2424 #ifndef CONFIG_DHD_USE_STATIC_BUF
2425                 MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
2426 #endif /* CONFIG_DHD_USE_STATIC_BUF */
2427
2428 #if defined(DHD_LB)
2429 #if defined(DHD_LB_TXC)
2430                 if (prot->tx_compl_prod.buffer) {
2431                         MFREE(dhd->osh, prot->tx_compl_prod.buffer,
2432                                 sizeof(void*) * DHD_LB_WORKQ_SZ);
2433                 }
2434 #endif /* DHD_LB_TXC */
2435 #if defined(DHD_LB_RXC)
2436                 if (prot->rx_compl_prod.buffer) {
2437                         MFREE(dhd->osh, prot->rx_compl_prod.buffer,
2438                                 sizeof(void*) * DHD_LB_WORKQ_SZ);
2439                 }
2440 #endif /* DHD_LB_RXC */
2441 #endif /* DHD_LB */
2442
2443                 dhd->prot = NULL;
2444         }
2445 } /* dhd_prot_detach */
2446
2447
2448 /**
2449  * dhd_prot_reset - Reset the protocol layer without freeing any objects. This
2450  * may be invoked to soft reboot the dongle, without having to detach and attach
2451  * the entire protocol layer.
2452  *
2453  * After dhd_prot_reset(), dhd_prot_init() may be invoked without going through
2454  * a dhd_prot_attach() phase.
2455  */
2456 void
2457 dhd_prot_reset(dhd_pub_t *dhd)
2458 {
2459         struct dhd_prot *prot = dhd->prot;
2460
2461         DHD_TRACE(("%s\n", __FUNCTION__));
2462
2463         if (prot == NULL) {
2464                 return;
2465         }
2466
2467         dhd_prot_flowrings_pool_reset(dhd);
2468
2469         dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
2470         dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
2471         dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
2472         dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
2473         dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
2474
2475         dhd_dma_buf_reset(dhd, &prot->retbuf);
2476         dhd_dma_buf_reset(dhd, &prot->ioctbuf);
2477         dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
2478         dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
2479         dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
2480         dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
2481         dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
2482
2483
2484         prot->rx_metadata_offset = 0;
2485         prot->tx_metadata_offset = 0;
2486
2487         prot->rxbufpost = 0;
2488         prot->cur_event_bufs_posted = 0;
2489         prot->cur_ioctlresp_bufs_posted = 0;
2490
2491         prot->active_tx_count = 0;
2492         prot->data_seq_no = 0;
2493         prot->ioctl_seq_no = 0;
2494         prot->ioctl_state = 0;
2495         prot->curr_ioctl_cmd = 0;
2496         prot->ioctl_received = IOCTL_WAIT;
2497         prot->ioctl_trans_id = 0;
2498
2499         /* dhd_flow_rings_init is located at dhd_bus_start,
2500          * so when stopping bus, flowrings shall be deleted
2501          */
2502         if (dhd->flow_rings_inited) {
2503                 dhd_flow_rings_deinit(dhd);
2504         }
2505
2506         if (prot->pktid_map_handle) {
2507                 DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_map_handle);
2508                 prot->pktid_map_handle = NULL;
2509         }
2510
2511 #ifdef IOCTLRESP_USE_CONSTMEM
2512         if (prot->pktid_map_handle_ioctl) {
2513                 DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
2514                 prot->pktid_map_handle_ioctl = NULL;
2515         }
2516 #endif /* IOCTLRESP_USE_CONSTMEM */
2517 } /* dhd_prot_reset */
2518
2519
2520 void
2521 dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
2522 {
2523         dhd_prot_t *prot = dhd->prot;
2524         prot->rx_dataoffset = rx_offset;
2525 }
2526
2527 /**
2528  * Initialize protocol: sync w/dongle state.
2529  * Sets dongle media info (iswl, drv_version, mac address).
2530  */
2531 int
2532 dhd_sync_with_dongle(dhd_pub_t *dhd)
2533 {
2534         int ret = 0;
2535         wlc_rev_info_t revinfo;
2536
2537
2538         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2539
2540         dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
2541
2542
2543
2544 #ifdef DHD_FW_COREDUMP
2545         /* Check the memdump capability */
2546         dhd_get_memdump_info(dhd);
2547 #endif /* DHD_FW_COREDUMP */
2548 #ifdef BCMASSERT_LOG
2549         dhd_get_assert_info(dhd);
2550 #endif /* BCMASSERT_LOG */
2551
2552         /* Get the device rev info */
2553         memset(&revinfo, 0, sizeof(revinfo));
2554         ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
2555         if (ret < 0) {
2556                 DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
2557                 goto done;
2558         }
2559         DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
2560                 revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
2561
2562         dhd_process_cid_mac(dhd, TRUE);
2563
2564         ret = dhd_preinit_ioctls(dhd);
2565
2566         if (!ret) {
2567                 dhd_process_cid_mac(dhd, FALSE);
2568         }
2569
2570         /* Always assumes wl for now */
2571         dhd->iswl = TRUE;
2572 done:
2573         return ret;
2574 } /* dhd_sync_with_dongle */
2575
2576 #if defined(DHD_LB)
2577
2578 /* DHD load balancing: deferral of work to another online CPU */
2579
2580 /* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
2581 extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
2582 extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
2583 extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
2584
2585 extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
2586
2587 /**
2588  * dhd_lb_dispatch - load balance by dispatch work to other CPU cores
2589  * Note: rx_compl_tasklet is dispatched explicitly.
2590  */
2591 static INLINE void
2592 dhd_lb_dispatch(dhd_pub_t *dhdp, uint16 ring_idx)
2593 {
2594         switch (ring_idx) {
2595
2596 #if defined(DHD_LB_TXC)
2597                 case BCMPCIE_D2H_MSGRING_TX_COMPLETE:
2598                         bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
2599                         dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
2600                         break;
2601 #endif /* DHD_LB_TXC */
2602
2603                 case BCMPCIE_D2H_MSGRING_RX_COMPLETE:
2604                 {
2605 #if defined(DHD_LB_RXC)
2606                         dhd_prot_t *prot = dhdp->prot;
2607                         /* Schedule the takslet only if we have to */
2608                         if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
2609                                 /* flush WR index */
2610                                 bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
2611                                 dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
2612                         }
2613 #endif /* DHD_LB_RXC */
2614 #if defined(DHD_LB_RXP)
2615                         dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
2616 #endif /* DHD_LB_RXP */
2617                         break;
2618                 }
2619                 default:
2620                         break;
2621         }
2622 }
2623
2624
2625 #if defined(DHD_LB_TXC)
2626 /**
2627  * DHD load balanced tx completion tasklet handler, that will perform the
2628  * freeing of packets on the selected CPU. Packet pointers are delivered to
2629  * this tasklet via the tx complete workq.
2630  */
2631 void
2632 dhd_lb_tx_compl_handler(unsigned long data)
2633 {
2634         int elem_ix;
2635         void *pkt, **elem;
2636         dmaaddr_t pa;
2637         uint32 pa_len;
2638         dhd_pub_t *dhd = (dhd_pub_t *)data;
2639         dhd_prot_t *prot = dhd->prot;
2640         bcm_workq_t *workq = &prot->tx_compl_cons;
2641         uint32 count = 0;
2642
2643         DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
2644
2645         while (1) {
2646                 elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
2647
2648                 if (elem_ix == BCM_RING_EMPTY) {
2649                         break;
2650                 }
2651
2652                 elem = WORKQ_ELEMENT(void *, workq, elem_ix);
2653                 pkt = *elem;
2654
2655                 DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt));
2656
2657                 OSL_PREFETCH(PKTTAG(pkt));
2658                 OSL_PREFETCH(pkt);
2659
2660                 pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
2661                 pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
2662
2663                 DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
2664
2665 #if defined(BCMPCIE)
2666                 dhd_txcomplete(dhd, pkt, true);
2667 #endif 
2668
2669                 PKTFREE(dhd->osh, pkt, TRUE);
2670                 count++;
2671         }
2672
2673         /* smp_wmb(); */
2674         bcm_workq_cons_sync(workq);
2675         DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count);
2676 }
2677 #endif /* DHD_LB_TXC */
2678
2679 #if defined(DHD_LB_RXC)
2680 void
2681 dhd_lb_rx_compl_handler(unsigned long data)
2682 {
2683         dhd_pub_t *dhd = (dhd_pub_t *)data;
2684         bcm_workq_t *workq = &dhd->prot->rx_compl_cons;
2685
2686         DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd);
2687
2688         dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */
2689         bcm_workq_cons_sync(workq);
2690 }
2691 #endif /* DHD_LB_RXC */
2692
2693 #endif /* DHD_LB */
2694
2695 #define DHD_DBG_SHOW_METADATA   0
2696
2697 #if DHD_DBG_SHOW_METADATA
2698 static void BCMFASTPATH
2699 dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
2700 {
2701         uint8 tlv_t;
2702         uint8 tlv_l;
2703         uint8 *tlv_v = (uint8 *)ptr;
2704
2705         if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
2706                 return;
2707
2708         len -= BCMPCIE_D2H_METADATA_HDRLEN;
2709         tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
2710
2711         while (len > TLV_HDR_LEN) {
2712                 tlv_t = tlv_v[TLV_TAG_OFF];
2713                 tlv_l = tlv_v[TLV_LEN_OFF];
2714
2715                 len -= TLV_HDR_LEN;
2716                 tlv_v += TLV_HDR_LEN;
2717                 if (len < tlv_l)
2718                         break;
2719                 if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
2720                         break;
2721
2722                 switch (tlv_t) {
2723                 case WLFC_CTL_TYPE_TXSTATUS: {
2724                         uint32 txs;
2725                         memcpy(&txs, tlv_v, sizeof(uint32));
2726                         if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
2727                                 printf("METADATA TX_STATUS: %08x\n", txs);
2728                         } else {
2729                                 wl_txstatus_additional_info_t tx_add_info;
2730                                 memcpy(&tx_add_info, tlv_v + sizeof(uint32),
2731                                         sizeof(wl_txstatus_additional_info_t));
2732                                 printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
2733                                         " rate = %08x tries = %d - %d\n", txs,
2734                                         tx_add_info.seq, tx_add_info.entry_ts,
2735                                         tx_add_info.enq_ts, tx_add_info.last_ts,
2736                                         tx_add_info.rspec, tx_add_info.rts_cnt,
2737                                         tx_add_info.tx_cnt);
2738                         }
2739                         } break;
2740
2741                 case WLFC_CTL_TYPE_RSSI: {
2742                         if (tlv_l == 1)
2743                                 printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
2744                         else
2745                                 printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
2746                                         (*(tlv_v + 3) << 8) | *(tlv_v + 2),
2747                                         (int8)(*tlv_v), *(tlv_v + 1));
2748                         } break;
2749
2750                 case WLFC_CTL_TYPE_FIFO_CREDITBACK:
2751                         bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
2752                         break;
2753
2754                 case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
2755                         bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
2756                         break;
2757
2758                 case WLFC_CTL_TYPE_RX_STAMP: {
2759                         struct {
2760                                 uint32 rspec;
2761                                 uint32 bus_time;
2762                                 uint32 wlan_time;
2763                         } rx_tmstamp;
2764                         memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
2765                         printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
2766                                 rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
2767                         } break;
2768
2769                 case WLFC_CTL_TYPE_TRANS_ID:
2770                         bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
2771                         break;
2772
2773                 case WLFC_CTL_TYPE_COMP_TXSTATUS:
2774                         bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
2775                         break;
2776
2777                 default:
2778                         bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
2779                         break;
2780                 }
2781
2782                 len -= tlv_l;
2783                 tlv_v += tlv_l;
2784         }
2785 }
2786 #endif /* DHD_DBG_SHOW_METADATA */
2787
2788 static INLINE void BCMFASTPATH
2789 dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
2790 {
2791         if (pkt) {
2792                 if (pkttype == PKTTYPE_IOCTL_RX ||
2793                         pkttype == PKTTYPE_EVENT_RX) {
2794 #ifdef DHD_USE_STATIC_CTRLBUF
2795                         PKTFREE_STATIC(dhd->osh, pkt, send);
2796 #else
2797                         PKTFREE(dhd->osh, pkt, send);
2798 #endif /* DHD_USE_STATIC_CTRLBUF */
2799                 } else {
2800                         PKTFREE(dhd->osh, pkt, send);
2801                 }
2802         }
2803 }
2804
2805 static INLINE void * BCMFASTPATH
2806 dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
2807 {
2808         void *PKTBUF;
2809         dmaaddr_t pa;
2810         uint32 len;
2811         void *dmah;
2812         void *secdma;
2813
2814 #ifdef DHD_PCIE_PKTID
2815         if (free_pktid) {
2816                 PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
2817                         pktid, pa, len, dmah, secdma, pkttype);
2818         } else {
2819                 PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_map_handle,
2820                         pktid, pa, len, dmah, secdma, pkttype);
2821         }
2822 #else
2823         PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid, pa,
2824                 len, dmah, secdma, pkttype);
2825 #endif /* DHD_PCIE_PKTID */
2826
2827         if (PKTBUF) {
2828                 {
2829                         if (SECURE_DMA_ENAB(dhd->osh)) {
2830                                 SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah,
2831                                         secdma, 0);
2832                         } else {
2833                                 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
2834                         }
2835                 }
2836         }
2837
2838         return PKTBUF;
2839 }
2840
2841 #ifdef IOCTLRESP_USE_CONSTMEM
2842 static INLINE void BCMFASTPATH
2843 dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
2844 {
2845         memset(retbuf, 0, sizeof(dhd_dma_buf_t));
2846         retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
2847                 retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
2848
2849         return;
2850 }
2851 #endif /* IOCTLRESP_USE_CONSTMEM */
2852
2853 static void BCMFASTPATH
2854 dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
2855 {
2856         dhd_prot_t *prot = dhd->prot;
2857         int16 fillbufs;
2858         uint16 cnt = 256;
2859         int retcount = 0;
2860
2861         fillbufs = prot->max_rxbufpost - prot->rxbufpost;
2862         while (fillbufs >= RX_BUF_BURST) {
2863                 cnt--;
2864                 if (cnt == 0) {
2865                         /* find a better way to reschedule rx buf post if space not available */
2866                         DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
2867                         DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
2868                         break;
2869                 }
2870
2871                 /* Post in a burst of 32 buffers at a time */
2872                 fillbufs = MIN(fillbufs, RX_BUF_BURST);
2873
2874                 /* Post buffers */
2875                 retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
2876
2877                 if (retcount >= 0) {
2878                         prot->rxbufpost += (uint16)retcount;
2879 #ifdef DHD_LB_RXC
2880                         /* dhd_prot_rxbuf_post returns the number of buffers posted */
2881                         DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount);
2882 #endif /* DHD_LB_RXC */
2883                         /* how many more to post */
2884                         fillbufs = prot->max_rxbufpost - prot->rxbufpost;
2885                 } else {
2886                         /* Make sure we don't run loop any further */
2887                         fillbufs = 0;
2888                 }
2889         }
2890 }
2891
2892 /** Post 'count' no of rx buffers to dongle */
2893 static int BCMFASTPATH
2894 dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
2895 {
2896         void *p;
2897         uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
2898         uint8 *rxbuf_post_tmp;
2899         host_rxbuf_post_t *rxbuf_post;
2900         void *msg_start;
2901         dmaaddr_t pa;
2902         uint32 pktlen;
2903         uint8 i = 0;
2904         uint16 alloced = 0;
2905         unsigned long flags;
2906         uint32 pktid;
2907         dhd_prot_t *prot = dhd->prot;
2908         msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
2909
2910         DHD_GENERAL_LOCK(dhd, flags);
2911
2912         /* Claim space for exactly 'count' no of messages, for mitigation purpose */
2913         msg_start = (void *)
2914                 dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
2915
2916         DHD_GENERAL_UNLOCK(dhd, flags);
2917
2918         if (msg_start == NULL) {
2919                 DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
2920                 return -1;
2921         }
2922         /* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
2923         ASSERT(alloced > 0);
2924
2925         rxbuf_post_tmp = (uint8*)msg_start;
2926
2927         /* loop through each allocated message in the rxbuf post msgbuf_ring */
2928         for (i = 0; i < alloced; i++) {
2929                 rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
2930                 /* Create a rx buffer */
2931                 if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
2932                         DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
2933                         dhd->rx_pktgetfail++;
2934                         break;
2935                 }
2936
2937                 pktlen = PKTLEN(dhd->osh, p);
2938                 if (SECURE_DMA_ENAB(dhd->osh)) {
2939                         DHD_GENERAL_LOCK(dhd, flags);
2940                         pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
2941                                 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
2942                         DHD_GENERAL_UNLOCK(dhd, flags);
2943                 } else {
2944                         pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
2945                 }
2946
2947                 if (PHYSADDRISZERO(pa)) {
2948                         if (SECURE_DMA_ENAB(dhd->osh)) {
2949                                 DHD_GENERAL_LOCK(dhd, flags);
2950                                 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
2951                                     ring->dma_buf.secdma, 0);
2952                                 DHD_GENERAL_UNLOCK(dhd, flags);
2953                         } else {
2954                                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
2955                         }
2956
2957                         PKTFREE(dhd->osh, p, FALSE);
2958                         DHD_ERROR(("Invalid phyaddr 0\n"));
2959                         ASSERT(0);
2960                         break;
2961                 }
2962
2963                 PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
2964                 pktlen = PKTLEN(dhd->osh, p);
2965
2966                 /* Common msg header */
2967                 rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
2968                 rxbuf_post->cmn_hdr.if_id = 0;
2969                 rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
2970                 ring->seqnum++;
2971
2972 #if defined(DHD_LB_RXC)
2973                 if (use_rsv_pktid == TRUE) {
2974                         bcm_workq_t *workq = &prot->rx_compl_cons;
2975                         int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
2976                         if (elem_ix == BCM_RING_EMPTY) {
2977                                 DHD_ERROR(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
2978                                 pktid = DHD_PKTID_INVALID;
2979                                 goto alloc_pkt_id;
2980                         } else {
2981                                 uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
2982                                 pktid = *elem;
2983                         }
2984
2985                         /* Now populate the previous locker with valid information */
2986                         if (pktid != DHD_PKTID_INVALID) {
2987                                 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
2988                                 DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, p, pktid,
2989                                         pa, pktlen, DMA_RX, NULL, ring->dma_buf.secdma,
2990                                         PKTTYPE_DATA_RX);
2991                         }
2992                 } else
2993 #endif /* DHD_LB_RXC */
2994                 {
2995 #if defined(DHD_LB_RXC)
2996 alloc_pkt_id:
2997 #endif
2998 #if defined(DHD_PCIE_PKTID)
2999                 /* get the lock before calling DHD_NATIVE_TO_PKTID */
3000                 DHD_GENERAL_LOCK(dhd, flags);
3001 #endif
3002                 pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_map_handle, p, pa,
3003                         pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
3004
3005 #if defined(DHD_PCIE_PKTID)
3006                 /* free lock */
3007                 DHD_GENERAL_UNLOCK(dhd, flags);
3008
3009                 if (pktid == DHD_PKTID_INVALID) {
3010
3011                         if (SECURE_DMA_ENAB(dhd->osh)) {
3012                                 DHD_GENERAL_LOCK(dhd, flags);
3013                                 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
3014                                     ring->dma_buf.secdma, 0);
3015                                 DHD_GENERAL_UNLOCK(dhd, flags);
3016                         } else {
3017                                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
3018                         }
3019
3020                         PKTFREE(dhd->osh, p, FALSE);
3021                         DHD_ERROR(("Pktid pool depleted.\n"));
3022                         break;
3023                 }
3024 #endif /* DHD_PCIE_PKTID */
3025                 }
3026
3027                 rxbuf_post->data_buf_len = htol16((uint16)pktlen);
3028                 rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
3029                 rxbuf_post->data_buf_addr.low_addr =
3030                         htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
3031
3032                 if (prot->rx_metadata_offset) {
3033                         rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
3034                         rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
3035                         rxbuf_post->metadata_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
3036                 } else {
3037                         rxbuf_post->metadata_buf_len = 0;
3038                         rxbuf_post->metadata_buf_addr.high_addr = 0;
3039                         rxbuf_post->metadata_buf_addr.low_addr  = 0;
3040                 }
3041
3042 #if defined(DHD_PKTID_AUDIT_RING)
3043                 DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid, DHD_DUPLICATE_ALLOC);
3044 #endif /* DHD_PKTID_AUDIT_RING */
3045
3046                 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
3047
3048                 /* Move rxbuf_post_tmp to next item */
3049                 rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
3050         }
3051
3052         if (i < alloced) {
3053                 if (ring->wr < (alloced - i)) {
3054                         ring->wr = ring->max_items - (alloced - i);
3055                 } else {
3056                         ring->wr -= (alloced - i);
3057                 }
3058
3059                 alloced = i;
3060         }
3061
3062         /* Update ring's WR index and ring doorbell to dongle */
3063         if (alloced > 0) {
3064                 dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
3065         }
3066
3067         return alloced;
3068 } /* dhd_prot_rxbuf_post */
3069
3070 #ifdef IOCTLRESP_USE_CONSTMEM
3071 static int
3072 alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
3073 {
3074         int err;
3075         memset(retbuf, 0, sizeof(dhd_dma_buf_t));
3076
3077         if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
3078                 DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
3079                 ASSERT(0);
3080                 return BCME_NOMEM;
3081         }
3082
3083         return BCME_OK;
3084 }
3085
3086 static void
3087 free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
3088 {
3089         /* retbuf (declared on stack) not fully populated ...  */
3090         if (retbuf->va) {
3091                 uint32 dma_pad;
3092                 dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
3093                 retbuf->len = IOCT_RETBUF_SIZE;
3094                 retbuf->_alloced = retbuf->len + dma_pad;
3095                 /* JIRA:SWWLAN-70021 The pa value would be overwritten by the dongle.
3096                  * Need to reassign before free to pass the check in dhd_dma_buf_audit().
3097                  */
3098                 retbuf->pa = DMA_MAP(dhd->osh, retbuf->va, retbuf->len, DMA_RX, NULL, NULL);
3099         }
3100
3101         dhd_dma_buf_free(dhd, retbuf);
3102         return;
3103 }
3104 #endif /* IOCTLRESP_USE_CONSTMEM */
3105
3106 static int
3107 dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf)
3108 {
3109         void *p;
3110         uint16 pktsz;
3111         ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
3112         dmaaddr_t pa;
3113         uint32 pktlen;
3114         dhd_prot_t *prot = dhd->prot;
3115         uint16 alloced = 0;
3116         unsigned long flags;
3117         dhd_dma_buf_t retbuf;
3118         void *dmah = NULL;
3119         uint32 pktid;
3120         void *map_handle;
3121         msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
3122
3123         if (dhd->busstate == DHD_BUS_DOWN) {
3124                 DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
3125                 return -1;
3126         }
3127
3128         memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
3129
3130         if (event_buf) {
3131                 /* Allocate packet for event buffer post */
3132                 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
3133         } else {
3134                 /* Allocate packet for ctrl/ioctl buffer post */
3135                 pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
3136         }
3137
3138 #ifdef IOCTLRESP_USE_CONSTMEM
3139         if (!event_buf) {
3140                 if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
3141                         DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
3142                         return -1;
3143                 }
3144                 ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
3145                 p = retbuf.va;
3146                 pktlen = retbuf.len;
3147                 pa = retbuf.pa;
3148                 dmah = retbuf.dmah;
3149         } else
3150 #endif /* IOCTLRESP_USE_CONSTMEM */
3151         {
3152 #ifdef DHD_USE_STATIC_CTRLBUF
3153                 p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
3154 #else
3155                 p = PKTGET(dhd->osh, pktsz, FALSE);
3156 #endif /* DHD_USE_STATIC_CTRLBUF */
3157                 if (p == NULL) {
3158                         DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
3159                                 __FUNCTION__, __LINE__, event_buf ?
3160                                 "EVENT" : "IOCTL RESP"));
3161                         dhd->rx_pktgetfail++;
3162                         return -1;
3163                 }
3164
3165                 pktlen = PKTLEN(dhd->osh, p);
3166
3167                 if (SECURE_DMA_ENAB(dhd->osh)) {
3168                         DHD_GENERAL_LOCK(dhd, flags);
3169                         pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
3170                                 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
3171                         DHD_GENERAL_UNLOCK(dhd, flags);
3172                 } else {
3173                         pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
3174                 }
3175
3176                 if (PHYSADDRISZERO(pa)) {
3177                         DHD_ERROR(("Invalid physaddr 0\n"));
3178                         ASSERT(0);
3179                         goto free_pkt_return;
3180                 }
3181         }
3182
3183         DHD_GENERAL_LOCK(dhd, flags);
3184
3185         rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
3186                 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
3187
3188         if (rxbuf_post == NULL) {
3189                 DHD_GENERAL_UNLOCK(dhd, flags);
3190                 DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
3191                         __FUNCTION__, __LINE__));
3192
3193 #ifdef IOCTLRESP_USE_CONSTMEM
3194                 if (event_buf)
3195 #endif /* IOCTLRESP_USE_CONSTMEM */
3196                 {
3197                         if (SECURE_DMA_ENAB(dhd->osh)) {
3198                                 DHD_GENERAL_LOCK(dhd, flags);
3199                                 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
3200                                         ring->dma_buf.secdma, 0);
3201                                 DHD_GENERAL_UNLOCK(dhd, flags);
3202                         } else {
3203                                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
3204                         }
3205                 }
3206                 goto free_pkt_return;
3207         }
3208
3209         /* CMN msg header */
3210         if (event_buf) {
3211                 rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST;
3212         } else {
3213                 rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST;
3214         }
3215
3216 #ifdef IOCTLRESP_USE_CONSTMEM
3217         if (!event_buf) {
3218                 map_handle = dhd->prot->pktid_map_handle_ioctl;
3219                 pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen,
3220                         DMA_RX, dmah, ring->dma_buf.secdma, PKTTYPE_IOCTL_RX);
3221         } else
3222 #endif /* IOCTLRESP_USE_CONSTMEM */
3223         {
3224                 map_handle = dhd->prot->pktid_map_handle;
3225                 pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
3226                         p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
3227                         event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX);
3228         }
3229
3230         if (pktid == DHD_PKTID_INVALID) {
3231                 if (ring->wr == 0) {
3232                         ring->wr = ring->max_items - 1;
3233                 } else {
3234                         ring->wr--;
3235                 }
3236                 DHD_GENERAL_UNLOCK(dhd, flags);
3237                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
3238                 goto free_pkt_return;
3239         }
3240
3241 #if defined(DHD_PKTID_AUDIT_RING)
3242         DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
3243 #endif /* DHD_PKTID_AUDIT_RING */
3244
3245         rxbuf_post->cmn_hdr.request_id = htol32(pktid);
3246         rxbuf_post->cmn_hdr.if_id = 0;
3247         rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
3248         ring->seqnum++;
3249
3250 #if defined(DHD_PCIE_PKTID)
3251         if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
3252                 if (ring->wr == 0) {
3253                         ring->wr = ring->max_items - 1;
3254                 } else {
3255                         ring->wr--;
3256                 }
3257                 DHD_GENERAL_UNLOCK(dhd, flags);
3258 #ifdef IOCTLRESP_USE_CONSTMEM
3259                 if (event_buf)
3260 #endif /* IOCTLRESP_USE_CONSTMEM */
3261                 {
3262                         if (SECURE_DMA_ENAB(dhd->osh)) {
3263                                 DHD_GENERAL_LOCK(dhd, flags);
3264                                 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
3265                                         ring->dma_buf.secdma, 0);
3266                                 DHD_GENERAL_UNLOCK(dhd, flags);
3267                         } else {
3268                                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
3269                         }
3270                 }
3271                 goto free_pkt_return;
3272         }
3273 #endif /* DHD_PCIE_PKTID */
3274
3275         rxbuf_post->cmn_hdr.flags = 0;
3276 #ifndef IOCTLRESP_USE_CONSTMEM
3277         rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
3278 #else
3279         rxbuf_post->host_buf_len = htol16((uint16)pktlen);
3280 #endif /* IOCTLRESP_USE_CONSTMEM */
3281         rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
3282         rxbuf_post->host_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
3283
3284         /* update ring's WR index and ring doorbell to dongle */
3285         dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
3286         DHD_GENERAL_UNLOCK(dhd, flags);
3287
3288         return 1;
3289
3290 free_pkt_return:
3291 #ifdef IOCTLRESP_USE_CONSTMEM
3292         if (!event_buf) {
3293                 free_ioctl_return_buffer(dhd, &retbuf);
3294         } else
3295 #endif /* IOCTLRESP_USE_CONSTMEM */
3296         {
3297                 dhd_prot_packet_free(dhd, p,
3298                         event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX,
3299                         FALSE);
3300         }
3301
3302         return -1;
3303 } /* dhd_prot_rxbufpost_ctrl */
3304
3305 static uint16
3306 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post)
3307 {
3308         uint32 i = 0;
3309         int32 ret_val;
3310
3311         DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf));
3312
3313         if (dhd->busstate == DHD_BUS_DOWN) {
3314                 DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
3315                 return 0;
3316         }
3317
3318         while (i < max_to_post) {
3319                 ret_val  = dhd_prot_rxbufpost_ctrl(dhd, event_buf);
3320                 if (ret_val < 0) {
3321                         break;
3322                 }
3323                 i++;
3324         }
3325         DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf));
3326         return (uint16)i;
3327 }
3328
3329 static void
3330 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
3331 {
3332         dhd_prot_t *prot = dhd->prot;
3333         int max_to_post;
3334
3335         DHD_INFO(("ioctl resp buf post\n"));
3336         max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
3337         if (max_to_post <= 0) {
3338                 DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
3339                         __FUNCTION__));
3340                 return;
3341         }
3342         prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
3343                 FALSE, max_to_post);
3344 }
3345
3346 static void
3347 dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
3348 {
3349         dhd_prot_t *prot = dhd->prot;
3350         int max_to_post;
3351
3352         max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
3353         if (max_to_post <= 0) {
3354                 DHD_INFO(("%s: Cannot post more than max event buffers\n",
3355                         __FUNCTION__));
3356                 return;
3357         }
3358         prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
3359                 TRUE, max_to_post);
3360 }
3361
3362 /** called when DHD needs to check for 'receive complete' messages from the dongle */
3363 bool BCMFASTPATH
3364 dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound)
3365 {
3366         bool more = TRUE;
3367         uint n = 0;
3368         msgbuf_ring_t *ring = &dhd->prot->d2hring_rx_cpln;
3369
3370         /* Process all the messages - DTOH direction */
3371         while (!dhd_is_device_removed(dhd)) {
3372                 uint8 *msg_addr;
3373                 uint32 msg_len;
3374
3375                 if (dhd->hang_was_sent) {
3376                         more = FALSE;
3377                         break;
3378                 }
3379
3380                 /* Get the address of the next message to be read from ring */
3381                 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
3382                 if (msg_addr == NULL) {
3383                         more = FALSE;
3384                         break;
3385                 }
3386
3387                 /* Prefetch data to populate the cache */
3388                 OSL_PREFETCH(msg_addr);
3389
3390                 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
3391                         DHD_ERROR(("%s: process %s msg addr %p len %d\n",
3392                                 __FUNCTION__, ring->name, msg_addr, msg_len));
3393                 }
3394
3395                 /* Update read pointer */
3396                 dhd_prot_upd_read_idx(dhd, ring);
3397
3398                 /* After batch processing, check RX bound */
3399                 n += msg_len / ring->item_len;
3400                 if (n >= bound) {
3401                         break;
3402                 }
3403         }
3404
3405         return more;
3406 }
3407
3408 /**
3409  * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
3410  */
3411 void
3412 dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
3413 {
3414         msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
3415
3416         /* Update read pointer */
3417         if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
3418                 ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
3419         }
3420
3421         DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
3422                 ring->idx, flowid, ring->wr, ring->rd));
3423
3424         /* Need more logic here, but for now use it directly */
3425         dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
3426 }
3427
3428 /** called when DHD needs to check for 'transmit complete' messages from the dongle */
3429 bool BCMFASTPATH
3430 dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound)
3431 {
3432         bool more = TRUE;
3433         uint n = 0;
3434         msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
3435
3436         /* Process all the messages - DTOH direction */
3437         while (!dhd_is_device_removed(dhd)) {
3438                 uint8 *msg_addr;
3439                 uint32 msg_len;
3440
3441                 if (dhd->hang_was_sent) {
3442                         more = FALSE;
3443                         break;
3444                 }
3445
3446                 /* Get the address of the next message to be read from ring */
3447                 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
3448                 if (msg_addr == NULL) {
3449                         more = FALSE;
3450                         break;
3451                 }
3452
3453                 /* Prefetch data to populate the cache */
3454                 OSL_PREFETCH(msg_addr);
3455
3456                 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
3457                         DHD_ERROR(("%s: process %s msg addr %p len %d\n",
3458                                 __FUNCTION__, ring->name, msg_addr, msg_len));
3459                 }
3460
3461                 /* Write to dngl rd ptr */
3462                 dhd_prot_upd_read_idx(dhd, ring);
3463
3464                 /* After batch processing, check bound */
3465                 n += msg_len / ring->item_len;
3466                 if (n >= bound) {
3467                         break;
3468                 }
3469         }
3470
3471         return more;
3472 }
3473
3474 /** called when DHD needs to check for 'ioctl complete' messages from the dongle */
3475 int BCMFASTPATH
3476 dhd_prot_process_ctrlbuf(dhd_pub_t *dhd)
3477 {
3478         dhd_prot_t *prot = dhd->prot;
3479         msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
3480
3481         /* Process all the messages - DTOH direction */
3482         while (!dhd_is_device_removed(dhd)) {
3483                 uint8 *msg_addr;
3484                 uint32 msg_len;
3485
3486                 if (dhd->hang_was_sent) {
3487                         break;
3488                 }
3489
3490                 /* Get the address of the next message to be read from ring */
3491                 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
3492                 if (msg_addr == NULL) {
3493                         break;
3494                 }
3495
3496                 /* Prefetch data to populate the cache */
3497                 OSL_PREFETCH(msg_addr);
3498
3499                 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
3500                         DHD_ERROR(("%s: process %s msg addr %p len %d\n",
3501                                 __FUNCTION__, ring->name, msg_addr, msg_len));
3502                 }
3503
3504                 /* Write to dngl rd ptr */
3505                 dhd_prot_upd_read_idx(dhd, ring);
3506         }
3507
3508         return 0;
3509 }
3510
3511 /**
3512  * Consume messages out of the D2H ring. Ensure that the message's DMA to host
3513  * memory has completed, before invoking the message handler via a table lookup
3514  * of the cmn_msg_hdr::msg_type.
3515  */
3516 static int BCMFASTPATH
3517 dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
3518 {
3519         int buf_len = len;
3520         uint16 item_len;
3521         uint8 msg_type;
3522         cmn_msg_hdr_t *msg = NULL;
3523         int ret = BCME_OK;
3524
3525         ASSERT(ring);
3526         item_len = ring->item_len;
3527         if (item_len == 0) {
3528                 DHD_ERROR(("%s: ringidx %d item_len %d buf_len %d\n",
3529                         __FUNCTION__, ring->idx, item_len, buf_len));
3530                 return BCME_ERROR;
3531         }
3532
3533         while (buf_len > 0) {
3534                 if (dhd->hang_was_sent) {
3535                         ret = BCME_ERROR;
3536                         goto done;
3537                 }
3538
3539                 msg = (cmn_msg_hdr_t *)buf;
3540
3541                 /*
3542                  * Update the curr_rd to the current index in the ring, from where
3543                  * the work item is fetched. This way if the fetched work item
3544                  * fails in LIVELOCK, we can print the exact read index in the ring
3545                  * that shows up the corrupted work item.
3546                  */
3547                 if ((ring->curr_rd + 1) >= ring->max_items) {
3548                         ring->curr_rd = 0;
3549                 } else {
3550                         ring->curr_rd += 1;
3551                 }
3552
3553 #if defined(PCIE_D2H_SYNC)
3554                 /* Wait until DMA completes, then fetch msg_type */
3555                 msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
3556 #else
3557                 msg_type = msg->msg_type;
3558 #endif /* !PCIE_D2H_SYNC */
3559
3560                 /* Prefetch data to populate the cache */
3561                 OSL_PREFETCH(buf + item_len);
3562
3563                 DHD_INFO(("msg_type %d item_len %d buf_len %d\n",
3564                         msg_type, item_len, buf_len));
3565
3566                 if (msg_type == MSG_TYPE_LOOPBACK) {
3567                         bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
3568                         DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
3569                 }
3570
3571                 ASSERT(msg_type < DHD_PROT_FUNCS);
3572                 if (msg_type >= DHD_PROT_FUNCS) {
3573                         DHD_ERROR(("%s: msg_type %d item_len %d buf_len %d\n",
3574                                 __FUNCTION__, msg_type, item_len, buf_len));
3575                         ret = BCME_ERROR;
3576                         goto done;
3577                 }
3578
3579                 if (table_lookup[msg_type]) {
3580                         table_lookup[msg_type](dhd, buf);
3581                 }
3582
3583                 if (buf_len < item_len) {
3584                         ret = BCME_ERROR;
3585                         goto done;
3586                 }
3587                 buf_len = buf_len - item_len;
3588                 buf = buf + item_len;
3589         }
3590
3591 done:
3592
3593 #ifdef DHD_RX_CHAINING
3594         dhd_rxchain_commit(dhd);
3595 #endif
3596 #if defined(DHD_LB)
3597         dhd_lb_dispatch(dhd, ring->idx);
3598 #endif
3599         return ret;
3600 } /* dhd_prot_process_msgtype */
3601
3602 static void
3603 dhd_prot_noop(dhd_pub_t *dhd, void *msg)
3604 {
3605         return;
3606 }
3607
3608 /** called on MSG_TYPE_RING_STATUS message received from dongle */
3609 static void
3610 dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
3611 {
3612         pcie_ring_status_t *ring_status = (pcie_ring_status_t *)msg;
3613         DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
3614                 ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status,
3615                 ring_status->compl_hdr.flow_ring_id, ring_status->write_idx));
3616         /* How do we track this to pair it with ??? */
3617         return;
3618 }
3619
3620 /** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
3621 static void
3622 dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
3623 {
3624         pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
3625         DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
3626                 gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
3627                 gen_status->compl_hdr.flow_ring_id));
3628
3629         /* How do we track this to pair it with ??? */
3630         return;
3631 }
3632
3633 /**
3634  * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
3635  * dongle received the ioctl message in dongle memory.
3636  */
3637 static void
3638 dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
3639 {
3640         uint32 pktid;
3641         ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
3642         unsigned long flags;
3643
3644         pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
3645
3646 #if defined(DHD_PKTID_AUDIT_RING)
3647         /* Skip DHD_IOCTL_REQ_PKTID = 0xFFFE */
3648         if (pktid != DHD_IOCTL_REQ_PKTID) {
3649                 if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
3650                         DHD_TEST_IS_ALLOC) == BCME_ERROR) {
3651                         prhex("dhd_prot_ioctack_process:",
3652                                 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
3653                 }
3654         }
3655 #endif /* DHD_PKTID_AUDIT_RING */
3656
3657         DHD_GENERAL_LOCK(dhd, flags);
3658         if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
3659                 (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
3660                 dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
3661         } else {
3662                 DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
3663                         __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
3664                 prhex("dhd_prot_ioctack_process:",
3665                         (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
3666         }
3667         DHD_GENERAL_UNLOCK(dhd, flags);
3668
3669         DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
3670                 ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
3671                 ioct_ack->compl_hdr.flow_ring_id));
3672         if (ioct_ack->compl_hdr.status != 0)  {
3673                 DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
3674         }
3675 }
3676
3677 /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
3678 static void
3679 dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
3680 {
3681         dhd_prot_t *prot = dhd->prot;
3682         uint32 pkt_id, xt_id;
3683         ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
3684         void *pkt;
3685         unsigned long flags;
3686         dhd_dma_buf_t retbuf;
3687
3688         memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
3689
3690         pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
3691
3692 #if defined(DHD_PKTID_AUDIT_RING)
3693         {
3694                 int ret;
3695 #ifndef IOCTLRESP_USE_CONSTMEM
3696                 ret = DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pkt_id,
3697                         DHD_DUPLICATE_FREE);
3698 #else
3699                 ret = DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle_ioctl, pkt_id,
3700                         DHD_DUPLICATE_FREE);
3701 #endif /* !IOCTLRESP_USE_CONSTMEM */
3702                 if (ret == BCME_ERROR) {
3703                         prhex("dhd_prot_ioctcmplt_process:",
3704                                 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
3705                 }
3706         }
3707 #endif /* DHD_PKTID_AUDIT_RING */
3708
3709         DHD_GENERAL_LOCK(dhd, flags);
3710         if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
3711                 !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
3712                 DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
3713                         __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
3714                 prhex("dhd_prot_ioctcmplt_process:",
3715                                 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
3716                 DHD_GENERAL_UNLOCK(dhd, flags);
3717                 return;
3718         }
3719 #ifndef IOCTLRESP_USE_CONSTMEM
3720         pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
3721 #else
3722         dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
3723         pkt = retbuf.va;
3724 #endif /* !IOCTLRESP_USE_CONSTMEM */
3725         if (!pkt) {
3726                 prot->ioctl_state = 0;
3727                 DHD_GENERAL_UNLOCK(dhd, flags);
3728                 DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
3729                 return;
3730         }
3731         DHD_GENERAL_UNLOCK(dhd, flags);
3732
3733         prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
3734         prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
3735         xt_id = ltoh16(ioct_resp->trans_id);
3736         if (xt_id != prot->ioctl_trans_id) {
3737                 ASSERT(0);
3738                 goto exit;
3739         }
3740
3741         DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
3742                 pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
3743
3744         if (prot->ioctl_resplen > 0) {
3745 #ifndef IOCTLRESP_USE_CONSTMEM
3746                 bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
3747 #else
3748                 bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
3749 #endif /* !IOCTLRESP_USE_CONSTMEM */
3750         }
3751
3752         /* wake up any dhd_os_ioctl_resp_wait() */
3753         dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
3754
3755 exit:
3756 #ifndef IOCTLRESP_USE_CONSTMEM
3757         dhd_prot_packet_free(dhd, pkt,
3758                 PKTTYPE_IOCTL_RX, FALSE);
3759 #else
3760         free_ioctl_return_buffer(dhd, &retbuf);
3761 #endif /* !IOCTLRESP_USE_CONSTMEM */
3762 }
3763
3764 /** called on MSG_TYPE_TX_STATUS message received from dongle */
3765 static void BCMFASTPATH
3766 dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
3767 {
3768         dhd_prot_t *prot = dhd->prot;
3769         host_txbuf_cmpl_t * txstatus;
3770         unsigned long flags;
3771         uint32 pktid;
3772         void *pkt = NULL;
3773         dmaaddr_t pa;
3774         uint32 len;
3775         void *dmah;
3776         void *secdma;
3777
3778         /* locks required to protect circular buffer accesses */
3779         DHD_GENERAL_LOCK(dhd, flags);
3780
3781         txstatus = (host_txbuf_cmpl_t *)msg;
3782         pktid = ltoh32(txstatus->cmn_hdr.request_id);
3783
3784 #if defined(DHD_PKTID_AUDIT_RING)
3785         if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
3786                 DHD_DUPLICATE_FREE) == BCME_ERROR) {
3787                         prhex("dhd_prot_txstatus_process:",
3788                                 (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
3789         }
3790 #endif /* DHD_PKTID_AUDIT_RING */
3791
3792         DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
3793         if (prot->active_tx_count) {
3794                 prot->active_tx_count--;
3795
3796                 /* Release the Lock when no more tx packets are pending */
3797                 if (prot->active_tx_count == 0)
3798                          DHD_OS_WAKE_UNLOCK(dhd);
3799
3800         } else {
3801                 DHD_ERROR(("Extra packets are freed\n"));
3802         }
3803
3804         ASSERT(pktid != 0);
3805
3806 #if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
3807         {
3808                 int elem_ix;
3809                 void **elem;
3810                 bcm_workq_t *workq;
3811
3812                 pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
3813                         pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX);
3814
3815                 workq = &prot->tx_compl_prod;
3816                 /*
3817                  * Produce the packet into the tx_compl workq for the tx compl tasklet
3818                  * to consume.
3819                  */
3820                 OSL_PREFETCH(PKTTAG(pkt));
3821
3822                 /* fetch next available slot in workq */
3823                 elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
3824
3825                 DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa);
3826                 DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len);
3827
3828                 if (elem_ix == BCM_RING_FULL) {
3829                         DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
3830                         goto workq_ring_full;
3831                 }
3832
3833                 elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix);
3834                 *elem = pkt;
3835
3836                 smp_wmb();
3837
3838                 /* Sync WR index to consumer if the SYNC threshold has been reached */
3839                 if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
3840                         bcm_workq_prod_sync(workq);
3841                         prot->tx_compl_prod_sync = 0;
3842                 }
3843
3844                 DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
3845                 __FUNCTION__, pkt, prot->tx_compl_prod_sync));
3846
3847                 DHD_GENERAL_UNLOCK(dhd, flags);
3848                 return;
3849            }
3850
3851 workq_ring_full:
3852
3853 #endif /* !DHD_LB_TXC */
3854
3855         /*
3856          * We can come here if no DHD_LB_TXC is enabled and in case where DHD_LB_TXC is
3857          * defined but the tx_compl queue is full.
3858          */
3859         if (pkt == NULL) {
3860                 pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
3861                         pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX);
3862         }
3863
3864         if (pkt) {
3865                 if (SECURE_DMA_ENAB(dhd->osh)) {
3866                         int offset = 0;
3867                         BCM_REFERENCE(offset);
3868
3869                         if (dhd->prot->tx_metadata_offset)
3870                                 offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
3871                         SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
3872                                 (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
3873                                 secdma, offset);
3874                 } else {
3875                         DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
3876                 }
3877 #if defined(BCMPCIE)
3878                 dhd_txcomplete(dhd, pkt, true);
3879 #endif 
3880
3881 #if DHD_DBG_SHOW_METADATA
3882                 if (dhd->prot->metadata_dbg &&
3883                     dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
3884                         uchar *ptr;
3885                         /* The Ethernet header of TX frame was copied and removed.
3886                          * Here, move the data pointer forward by Ethernet header size.
3887                          */
3888                         PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
3889                         ptr = PKTDATA(dhd->osh, pkt)  - (dhd->prot->tx_metadata_offset);
3890                         bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
3891                         dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
3892                 }
3893 #endif /* DHD_DBG_SHOW_METADATA */
3894                 PKTFREE(dhd->osh, pkt, TRUE);
3895                 DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
3896                 txstatus->tx_status);
3897         }
3898
3899         DHD_GENERAL_UNLOCK(dhd, flags);
3900
3901         return;
3902 } /* dhd_prot_txstatus_process */
3903
3904 /** called on MSG_TYPE_WL_EVENT message received from dongle */
3905 static void
3906 dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
3907 {
3908         wlevent_req_msg_t *evnt;
3909         uint32 bufid;
3910         uint16 buflen;
3911         int ifidx = 0;
3912         void* pkt;
3913         unsigned long flags;
3914         dhd_prot_t *prot = dhd->prot;
3915
3916         /* Event complete header */
3917         evnt = (wlevent_req_msg_t *)msg;
3918         bufid = ltoh32(evnt->cmn_hdr.request_id);
3919
3920 #if defined(DHD_PKTID_AUDIT_RING)
3921         if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, bufid,
3922                 DHD_DUPLICATE_FREE) == BCME_ERROR) {
3923                         prhex("dhd_prot_event_process:",
3924                                 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
3925         }
3926 #endif /* DHD_PKTID_AUDIT_RING */
3927
3928         buflen = ltoh16(evnt->event_data_len);
3929
3930         ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
3931
3932         /* Post another rxbuf to the device */
3933         if (prot->cur_event_bufs_posted) {
3934                 prot->cur_event_bufs_posted--;
3935         }
3936         dhd_msgbuf_rxbuf_post_event_bufs(dhd);
3937
3938         /* locks required to protect pktid_map */
3939         DHD_GENERAL_LOCK(dhd, flags);
3940         pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
3941         DHD_GENERAL_UNLOCK(dhd, flags);
3942
3943         if (!pkt) {
3944                 return;
3945         }
3946
3947         /* DMA RX offset updated through shared area */
3948         if (dhd->prot->rx_dataoffset) {
3949                 PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
3950         }
3951
3952         PKTSETLEN(dhd->osh, pkt, buflen);
3953
3954         dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
3955 }
3956
3957 /** called on MSG_TYPE_RX_CMPLT message received from dongle */
3958 static void BCMFASTPATH
3959 dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg)
3960 {
3961         host_rxbuf_cmpl_t *rxcmplt_h;
3962         uint16 data_offset;             /* offset at which data starts */
3963         void *pkt;
3964         unsigned long flags;
3965         uint ifidx;
3966         uint32 pktid;
3967 #if defined(DHD_LB_RXC)
3968         const bool free_pktid = FALSE;
3969 #else
3970         const bool free_pktid = TRUE;
3971 #endif /* DHD_LB_RXC */
3972
3973         /* RXCMPLT HDR */
3974         rxcmplt_h = (host_rxbuf_cmpl_t *)msg;
3975
3976         /* offset from which data starts is populated in rxstatus0 */
3977         data_offset = ltoh16(rxcmplt_h->data_offset);
3978
3979         pktid = ltoh32(rxcmplt_h->cmn_hdr.request_id);
3980
3981 #if defined(DHD_PKTID_AUDIT_RING)
3982         if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
3983                 DHD_DUPLICATE_FREE) == BCME_ERROR) {
3984                         prhex("dhd_prot_rxcmplt_process:",
3985                                 (uchar *)msg, D2HRING_RXCMPLT_ITEMSIZE);
3986                 }
3987 #endif /* DHD_PKTID_AUDIT_RING */
3988
3989         DHD_GENERAL_LOCK(dhd, flags);
3990         pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_DATA_RX, free_pktid);
3991         DHD_GENERAL_UNLOCK(dhd, flags);
3992
3993         if (!pkt) {
3994                 return;
3995         }
3996
3997         /* Post another set of rxbufs to the device */
3998         dhd_prot_return_rxbuf(dhd, pktid, 1);
3999
4000         DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n",
4001                 ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len),
4002                 rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
4003                 ltoh16(rxcmplt_h->metadata_len)));
4004 #if DHD_DBG_SHOW_METADATA
4005         if (dhd->prot->metadata_dbg &&
4006             dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) {
4007                 uchar *ptr;
4008                 ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset);
4009                 /* header followed by data */
4010                 bcm_print_bytes("rxmetadata", ptr, rxcmplt_h->metadata_len);
4011                 dhd_prot_print_metadata(dhd, ptr, rxcmplt_h->metadata_len);
4012         }
4013 #endif /* DHD_DBG_SHOW_METADATA */
4014
4015         if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
4016                 DHD_INFO(("D11 frame rxed \n"));
4017         }
4018
4019         /* data_offset from buf start */
4020         if (data_offset) {
4021                 /* data offset given from dongle after split rx */
4022                 PKTPULL(dhd->osh, pkt, data_offset); /* data offset */
4023         } else {
4024                 /* DMA RX offset updated through shared area */
4025                 if (dhd->prot->rx_dataoffset) {
4026                         PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
4027                 }
4028         }
4029         /* Actual length of the packet */
4030         PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len));
4031
4032         ifidx = rxcmplt_h->cmn_hdr.if_id;
4033
4034 #if defined(DHD_LB_RXP)
4035         dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
4036 #else  /* ! DHD_LB_RXP */
4037 #ifdef DHD_RX_CHAINING
4038         /* Chain the packets */
4039         dhd_rxchain_frame(dhd, pkt, ifidx);
4040 #else /* ! DHD_RX_CHAINING */
4041         /* offset from which data starts is populated in rxstatus0 */
4042         dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
4043 #endif /* ! DHD_RX_CHAINING */
4044 #endif /* ! DHD_LB_RXP */
4045 } /* dhd_prot_rxcmplt_process */
4046
4047 /** Stop protocol: sync w/dongle state. */
4048 void dhd_prot_stop(dhd_pub_t *dhd)
4049 {
4050         ASSERT(dhd);
4051         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4052
4053 }
4054
4055 /* Add any protocol-specific data header.
4056  * Caller must reserve prot_hdrlen prepend space.
4057  */
4058 void BCMFASTPATH
4059 dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
4060 {
4061         return;
4062 }
4063
4064 uint
4065 dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
4066 {
4067         return 0;
4068 }
4069
4070
4071 #define PKTBUF pktbuf
4072
4073 /**
4074  * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
4075  * the corresponding flow ring.
4076  */
4077 int BCMFASTPATH
4078 dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
4079 {
4080         unsigned long flags;
4081         dhd_prot_t *prot = dhd->prot;
4082         host_txbuf_post_t *txdesc = NULL;
4083         dmaaddr_t pa, meta_pa;
4084         uint8 *pktdata;
4085         uint32 pktlen;
4086         uint32 pktid;
4087         uint8   prio;
4088         uint16 flowid = 0;
4089         uint16 alloced = 0;
4090         uint16  headroom;
4091         msgbuf_ring_t *ring;
4092         flow_ring_table_t *flow_ring_table;
4093         flow_ring_node_t *flow_ring_node;
4094
4095         if (dhd->flow_ring_table == NULL) {
4096                 return BCME_NORESOURCE;
4097         }
4098
4099         flowid = DHD_PKT_GET_FLOWID(PKTBUF);
4100
4101         flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
4102         flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
4103
4104         ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
4105
4106
4107         DHD_GENERAL_LOCK(dhd, flags);
4108
4109         /* Create a unique 32-bit packet id */
4110         pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_map_handle, PKTBUF);
4111 #if defined(DHD_PCIE_PKTID)
4112         if (pktid == DHD_PKTID_INVALID) {
4113                 DHD_ERROR(("Pktid pool depleted.\n"));
4114                 /*
4115                  * If we return error here, the caller would queue the packet
4116                  * again. So we'll just free the skb allocated in DMA Zone.
4117                  * Since we have not freed the original SKB yet the caller would
4118                  * requeue the same.
4119                  */
4120                 goto err_no_res_pktfree;
4121         }
4122 #endif /* DHD_PCIE_PKTID */
4123
4124         /* Reserve space in the circular buffer */
4125         txdesc = (host_txbuf_post_t *)
4126                 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
4127         if (txdesc == NULL) {
4128 #if defined(DHD_PCIE_PKTID)
4129                 void *dmah;
4130                 void *secdma;
4131                 /* Free up the PKTID. physaddr and pktlen will be garbage. */
4132                 DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid,
4133                         pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
4134 #endif /* DHD_PCIE_PKTID */
4135                 DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
4136                         __FUNCTION__, __LINE__, prot->active_tx_count));
4137                 goto err_no_res_pktfree;
4138         }
4139
4140         /* Extract the data pointer and length information */
4141         pktdata = PKTDATA(dhd->osh, PKTBUF);
4142         pktlen  = PKTLEN(dhd->osh, PKTBUF);
4143
4144         /* Ethernet header: Copy before we cache flush packet using DMA_MAP */
4145         bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
4146
4147         /* Extract the ethernet header and adjust the data pointer and length */
4148         pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
4149         pktlen -= ETHER_HDR_LEN;
4150
4151         /* Map the data pointer to a DMA-able address */
4152         if (SECURE_DMA_ENAB(dhd->osh)) {
4153                 int offset = 0;
4154                 BCM_REFERENCE(offset);
4155
4156                 if (prot->tx_metadata_offset) {
4157                         offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
4158                 }
4159
4160                 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
4161                         DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset);
4162         } else {
4163                 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
4164         }
4165
4166         if ((PHYSADDRHI(pa) == 0) && (PHYSADDRLO(pa) == 0)) {
4167                 DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
4168                 ASSERT(0);
4169         }
4170
4171         /* No need to lock. Save the rest of the packet's metadata */
4172         DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, PKTBUF, pktid,
4173             pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
4174
4175 #ifdef TXP_FLUSH_NITEMS
4176         if (ring->pend_items_count == 0) {
4177                 ring->start_addr = (void *)txdesc;
4178         }
4179         ring->pend_items_count++;
4180 #endif
4181
4182         /* Form the Tx descriptor message buffer */
4183
4184         /* Common message hdr */
4185         txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
4186         txdesc->cmn_hdr.if_id = ifidx;
4187
4188         txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
4189         prio = (uint8)PKTPRIO(PKTBUF);
4190
4191
4192         txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
4193         txdesc->seg_cnt = 1;
4194
4195         txdesc->data_len = htol16((uint16) pktlen);
4196         txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4197         txdesc->data_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
4198
4199         /* Move data pointer to keep ether header in local PKTBUF for later reference */
4200         PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
4201
4202         /* Handle Tx metadata */
4203         headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
4204         if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) {
4205                 DHD_ERROR(("No headroom for Metadata tx %d %d\n",
4206                 prot->tx_metadata_offset, headroom));
4207         }
4208
4209         if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
4210                 DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
4211
4212                 /* Adjust the data pointer to account for meta data in DMA_MAP */
4213                 PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
4214
4215                 if (SECURE_DMA_ENAB(dhd->osh)) {
4216                         meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
4217                                 prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF,
4218                                 0, ring->dma_buf.secdma);
4219                 } else {
4220                         meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
4221                                 prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
4222                 }
4223
4224                 if (PHYSADDRISZERO(meta_pa)) {
4225                         DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
4226                         ASSERT(0);
4227                 }
4228
4229                 /* Adjust the data pointer back to original value */
4230                 PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
4231
4232                 txdesc->metadata_buf_len = prot->tx_metadata_offset;
4233                 txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
4234                 txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
4235         } else {
4236                 txdesc->metadata_buf_len = htol16(0);
4237                 txdesc->metadata_buf_addr.high_addr = 0;
4238                 txdesc->metadata_buf_addr.low_addr = 0;
4239         }
4240
4241 #if defined(DHD_PKTID_AUDIT_RING)
4242         DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid,
4243                 DHD_DUPLICATE_ALLOC);
4244 #endif /* DHD_PKTID_AUDIT_RING */
4245
4246         txdesc->cmn_hdr.request_id = htol32(pktid);
4247
4248         DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
4249                 txdesc->cmn_hdr.request_id));
4250
4251         /* Update the write pointer in TCM & ring bell */
4252 #ifdef TXP_FLUSH_NITEMS
4253         /* Flush if we have either hit the txp_threshold or if this msg is */
4254         /* occupying the last slot in the flow_ring - before wrap around.  */
4255         if ((ring->pend_items_count == prot->txp_threshold) ||
4256                 ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
4257                 dhd_prot_txdata_write_flush(dhd, flowid, TRUE);
4258         }
4259 #else
4260         /* update ring's WR index and ring doorbell to dongle */
4261         dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
4262 #endif
4263
4264         prot->active_tx_count++;
4265
4266         /*
4267          * Take a wake lock, do not sleep if we have atleast one packet
4268          * to finish.
4269          */
4270         if (prot->active_tx_count == 1)
4271                 DHD_OS_WAKE_LOCK(dhd);
4272
4273         DHD_GENERAL_UNLOCK(dhd, flags);
4274
4275         return BCME_OK;
4276
4277 err_no_res_pktfree:
4278
4279
4280
4281         DHD_GENERAL_UNLOCK(dhd, flags);
4282         return BCME_NORESOURCE;
4283 } /* dhd_prot_txdata */
4284
4285 /* called with a lock */
4286 /** optimization to write "n" tx items at a time to ring */
4287 void BCMFASTPATH
4288 dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock)
4289 {
4290 #ifdef TXP_FLUSH_NITEMS
4291         unsigned long flags = 0;
4292         flow_ring_table_t *flow_ring_table;
4293         flow_ring_node_t *flow_ring_node;
4294         msgbuf_ring_t *ring;
4295
4296         if (dhd->flow_ring_table == NULL) {
4297                 return;
4298         }
4299
4300         if (!in_lock) {
4301                 DHD_GENERAL_LOCK(dhd, flags);
4302         }
4303
4304         flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
4305         flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
4306         ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
4307
4308         if (ring->pend_items_count) {
4309                 /* update ring's WR index and ring doorbell to dongle */
4310                 dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
4311                         ring->pend_items_count);
4312                 ring->pend_items_count = 0;
4313                 ring->start_addr = NULL;
4314         }
4315
4316         if (!in_lock) {
4317                 DHD_GENERAL_UNLOCK(dhd, flags);
4318         }
4319 #endif /* TXP_FLUSH_NITEMS */
4320 }
4321
4322 #undef PKTBUF   /* Only defined in the above routine */
4323
4324 int BCMFASTPATH
4325 dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
4326 {
4327         return 0;
4328 }
4329
4330 /** post a set of receive buffers to the dongle */
4331 static void BCMFASTPATH
4332 dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt)
4333 {
4334         dhd_prot_t *prot = dhd->prot;
4335 #if defined(DHD_LB_RXC)
4336         int elem_ix;
4337         uint32 *elem;
4338         bcm_workq_t *workq;
4339
4340         workq = &prot->rx_compl_prod;
4341
4342         /* Produce the work item */
4343         elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
4344         if (elem_ix == BCM_RING_FULL) {
4345                 DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__));
4346                 ASSERT(0);
4347                 return;
4348         }
4349
4350         elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
4351         *elem = pktid;
4352
4353         smp_wmb();
4354
4355         /* Sync WR index to consumer if the SYNC threshold has been reached */
4356         if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
4357                 bcm_workq_prod_sync(workq);
4358                 prot->rx_compl_prod_sync = 0;
4359         }
4360
4361         DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
4362                 __FUNCTION__, pktid, prot->rx_compl_prod_sync));
4363
4364 #endif /* DHD_LB_RXC */
4365
4366
4367         if (prot->rxbufpost >= rxcnt) {
4368                 prot->rxbufpost -= rxcnt;
4369         } else {
4370                 /* ASSERT(0); */
4371                 prot->rxbufpost = 0;
4372         }
4373
4374 #if !defined(DHD_LB_RXC)
4375         if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
4376                 dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
4377         }
4378 #endif /* !DHD_LB_RXC */
4379 }
4380
4381 /* called before an ioctl is sent to the dongle */
4382 static void
4383 dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
4384 {
4385         dhd_prot_t *prot = dhd->prot;
4386
4387         if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
4388                 int slen = 0;
4389                 pcie_bus_tput_params_t *tput_params;
4390
4391                 slen = strlen("pcie_bus_tput") + 1;
4392                 tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
4393                 bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
4394                         sizeof(tput_params->host_buf_addr));
4395                 tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
4396         }
4397 }
4398
4399
4400 /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
4401 int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
4402 {
4403         int ret = -1;
4404         uint8 action;
4405
4406         if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
4407                 DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
4408                 goto done;
4409         }
4410
4411         if (dhd->busstate == DHD_BUS_SUSPEND) {
4412                 DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
4413                 goto done;
4414         }
4415
4416         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4417
4418         if (ioc->cmd == WLC_SET_PM) {
4419                 DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, *(char *)buf));
4420         }
4421
4422         ASSERT(len <= WLC_IOCTL_MAXLEN);
4423
4424         if (len > WLC_IOCTL_MAXLEN) {
4425                 goto done;
4426         }
4427
4428         action = ioc->set;
4429
4430         dhd_prot_wlioctl_intercept(dhd, ioc, buf);
4431
4432         if (action & WL_IOCTL_ACTION_SET) {
4433                 ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
4434         } else {
4435                 ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
4436                 if (ret > 0) {
4437                         ioc->used = ret;
4438                 }
4439         }
4440
4441         /* Too many programs assume ioctl() returns 0 on success */
4442         if (ret >= 0) {
4443                 ret = 0;
4444         } else {
4445                 DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret));
4446                 dhd->dongle_error = ret;
4447         }
4448
4449         if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
4450                 /* Intercept the wme_dp ioctl here */
4451                 if (!strcmp(buf, "wme_dp")) {
4452                         int slen, val = 0;
4453
4454                         slen = strlen("wme_dp") + 1;
4455                         if (len >= (int)(slen + sizeof(int))) {
4456                                 bcopy(((char *)buf + slen), &val, sizeof(int));
4457                         }
4458                         dhd->wme_dp = (uint8) ltoh32(val);
4459                 }
4460
4461         }
4462
4463 done:
4464         return ret;
4465
4466 } /* dhd_prot_ioctl */
4467
4468 /** test / loopback */
4469
4470 int
4471 dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
4472 {
4473         unsigned long flags;
4474         dhd_prot_t *prot = dhd->prot;
4475         uint16 alloced = 0;
4476
4477         ioct_reqst_hdr_t *ioct_rqst;
4478
4479         uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
4480         uint16 msglen = len + hdrlen;
4481         msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
4482
4483         msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
4484         msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
4485
4486         DHD_GENERAL_LOCK(dhd, flags);
4487
4488         ioct_rqst = (ioct_reqst_hdr_t *)
4489                 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
4490
4491         if (ioct_rqst == NULL) {
4492                 DHD_GENERAL_UNLOCK(dhd, flags);
4493                 return 0;
4494         }
4495
4496         {
4497                 uint8 *ptr;
4498                 uint16 i;
4499
4500                 ptr = (uint8 *)ioct_rqst;
4501                 for (i = 0; i < msglen; i++) {
4502                         ptr[i] = i % 256;
4503                 }
4504         }
4505
4506         /* Common msg buf hdr */
4507         ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4508         ring->seqnum++;
4509
4510         ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
4511         ioct_rqst->msg.if_id = 0;
4512
4513         bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
4514
4515         /* update ring's WR index and ring doorbell to dongle */
4516         dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
4517         DHD_GENERAL_UNLOCK(dhd, flags);
4518
4519         return 0;
4520 }
4521
4522 /** test / loopback */
4523 void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
4524 {
4525         if (dmaxfer == NULL) {
4526                 return;
4527         }
4528
4529         dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
4530         dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
4531 }
4532
4533 /** test / loopback */
4534 int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
4535         uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
4536 {
4537         uint i;
4538         if (!dmaxfer) {
4539                 return BCME_ERROR;
4540         }
4541
4542         /* First free up existing buffers */
4543         dmaxfer_free_dmaaddr(dhd, dmaxfer);
4544
4545         if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
4546                 return BCME_NOMEM;
4547         }
4548
4549         if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
4550                 dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
4551                 return BCME_NOMEM;
4552         }
4553
4554         dmaxfer->len = len;
4555
4556         /* Populate source with a pattern */
4557         for (i = 0; i < dmaxfer->len; i++) {
4558                 ((uint8*)dmaxfer->srcmem.va)[i] = i % 256;
4559         }
4560         OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
4561
4562         dmaxfer->srcdelay = srcdelay;
4563         dmaxfer->destdelay = destdelay;
4564
4565         return BCME_OK;
4566 } /* dmaxfer_prepare_dmaaddr */
4567
4568 static void
4569 dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
4570 {
4571         dhd_prot_t *prot = dhd->prot;
4572
4573         OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
4574         if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
4575                 if (memcmp(prot->dmaxfer.srcmem.va,
4576                         prot->dmaxfer.dstmem.va, prot->dmaxfer.len)) {
4577                         bcm_print_bytes("XFER SRC: ",
4578                             prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
4579                         bcm_print_bytes("XFER DST: ",
4580                             prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
4581                 } else {
4582                         DHD_INFO(("DMA successful\n"));
4583                 }
4584         }
4585         dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
4586         dhd->prot->dmaxfer.in_progress = FALSE;
4587 }
4588
4589 /** Test functionality.
4590  * Transfers bytes from host to dongle and to host again using DMA
4591  * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
4592  * by a spinlock.
4593  */
4594 int
4595 dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay)
4596 {
4597         unsigned long flags;
4598         int ret = BCME_OK;
4599         dhd_prot_t *prot = dhd->prot;
4600         pcie_dma_xfer_params_t *dmap;
4601         uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
4602         uint16 alloced = 0;
4603         msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
4604
4605         if (prot->dmaxfer.in_progress) {
4606                 DHD_ERROR(("DMA is in progress...\n"));
4607                 return ret;
4608         }
4609
4610         prot->dmaxfer.in_progress = TRUE;
4611         if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
4612                 &prot->dmaxfer)) != BCME_OK) {
4613                 prot->dmaxfer.in_progress = FALSE;
4614                 return ret;
4615         }
4616
4617         DHD_GENERAL_LOCK(dhd, flags);
4618
4619         dmap = (pcie_dma_xfer_params_t *)
4620                 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
4621
4622         if (dmap == NULL) {
4623                 dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
4624                 prot->dmaxfer.in_progress = FALSE;
4625                 DHD_GENERAL_UNLOCK(dhd, flags);
4626                 return BCME_NOMEM;
4627         }
4628
4629         /* Common msg buf hdr */
4630         dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
4631         dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
4632         dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4633         ring->seqnum++;
4634
4635         dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
4636         dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
4637         dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
4638         dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
4639         dmap->xfer_len = htol32(prot->dmaxfer.len);
4640         dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
4641         dmap->destdelay = htol32(prot->dmaxfer.destdelay);
4642
4643         /* update ring's WR index and ring doorbell to dongle */
4644         dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
4645         DHD_GENERAL_UNLOCK(dhd, flags);
4646
4647         DHD_ERROR(("DMA Started...\n"));
4648
4649         return BCME_OK;
4650 } /* dhdmsgbuf_dmaxfer_req */
4651
4652 /** Called in the process of submitting an ioctl to the dongle */
4653 static int
4654 dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
4655 {
4656         int ret = 0;
4657
4658         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4659
4660         /* Respond "bcmerror" and "bcmerrorstr" with local cache */
4661         if (cmd == WLC_GET_VAR && buf)
4662         {
4663                 if (!strcmp((char *)buf, "bcmerrorstr"))
4664                 {
4665                         strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
4666                         goto done;
4667                 }
4668                 else if (!strcmp((char *)buf, "bcmerror"))
4669                 {
4670                         *(int *)buf = dhd->dongle_error;
4671                         goto done;
4672                 }
4673         }
4674
4675         ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
4676
4677         DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
4678                 action, ifidx, cmd, len));
4679
4680         /* wait for IOCTL completion message from dongle and get first fragment */
4681         ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
4682
4683 done:
4684         return ret;
4685 }
4686
4687 /**
4688  * Waits for IOCTL completion message from the dongle, copies this into caller
4689  * provided parameter 'buf'.
4690  */
4691 static int
4692 dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
4693 {
4694         dhd_prot_t *prot = dhd->prot;
4695         int timeleft;
4696         unsigned long flags;
4697         int ret = 0;
4698
4699         DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4700
4701         if (dhd->dongle_reset) {
4702                 ret = -EIO;
4703                 goto out;
4704         }
4705
4706         if (prot->cur_ioctlresp_bufs_posted) {
4707                 prot->cur_ioctlresp_bufs_posted--;
4708         }
4709
4710         dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
4711
4712         timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
4713         if (timeleft == 0) {
4714                 dhd->rxcnt_timeout++;
4715                 dhd->rx_ctlerrs++;
4716                 DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d "
4717                         "trans_id %d state %d busstate=%d ioctl_received=%d\n",
4718                         __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd,
4719                         prot->ioctl_trans_id, prot->ioctl_state,
4720                         dhd->busstate, prot->ioctl_received));
4721
4722                 dhd_prot_debug_info_print(dhd);
4723
4724 #ifdef DHD_FW_COREDUMP
4725                 /* As soon as FW TRAP occurs, FW dump will be collected from dhdpcie_checkdied */
4726                 if (dhd->memdump_enabled && !dhd->dongle_trap_occured) {
4727                         /* collect core dump */
4728                         dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
4729                         dhd_bus_mem_dump(dhd);
4730                 }
4731 #endif /* DHD_FW_COREDUMP */
4732                 if (dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) {
4733 #ifdef SUPPORT_LINKDOWN_RECOVERY
4734 #ifdef CONFIG_ARCH_MSM
4735                         dhd->bus->no_cfg_restore = 1;
4736 #endif /* CONFIG_ARCH_MSM */
4737 #endif /* SUPPORT_LINKDOWN_RECOVERY */
4738                         DHD_ERROR(("%s: timeout > MAX_CNTL_TX_TIMEOUT\n", __FUNCTION__));
4739                 }
4740                 ret = -ETIMEDOUT;
4741                 goto out;
4742         } else {
4743                 if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
4744                         DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
4745                                 __FUNCTION__, prot->ioctl_received));
4746                         ret = -ECONNABORTED;
4747                         goto out;
4748                 }
4749                 dhd->rxcnt_timeout = 0;
4750                 dhd->rx_ctlpkts++;
4751                 DHD_CTL(("%s: ioctl resp resumed, got %d\n",
4752                         __FUNCTION__, prot->ioctl_resplen));
4753         }
4754
4755         if (dhd->dongle_trap_occured) {
4756 #ifdef SUPPORT_LINKDOWN_RECOVERY
4757 #ifdef CONFIG_ARCH_MSM
4758                 dhd->bus->no_cfg_restore = 1;
4759 #endif /* CONFIG_ARCH_MSM */
4760 #endif /* SUPPORT_LINKDOWN_RECOVERY */
4761                 DHD_ERROR(("%s: TRAP occurred!!\n", __FUNCTION__));
4762                 ret = -EREMOTEIO;
4763                 goto out;
4764         }
4765
4766         if (dhd->prot->ioctl_resplen > len) {
4767                 dhd->prot->ioctl_resplen = (uint16)len;
4768         }
4769         if (buf) {
4770                 bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
4771         }
4772
4773         ret = (int)(dhd->prot->ioctl_status);
4774 out:
4775         DHD_GENERAL_LOCK(dhd, flags);
4776         dhd->prot->ioctl_state = 0;
4777         dhd->prot->ioctl_resplen = 0;
4778         dhd->prot->ioctl_received = IOCTL_WAIT;
4779         dhd->prot->curr_ioctl_cmd = 0;
4780         DHD_GENERAL_UNLOCK(dhd, flags);
4781
4782         return ret;
4783 } /* dhd_msgbuf_wait_ioctl_cmplt */
4784
4785 static int
4786 dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
4787 {
4788         int ret = 0;
4789
4790         DHD_TRACE(("%s: Enter \n", __FUNCTION__));
4791
4792         if (dhd->busstate == DHD_BUS_DOWN) {
4793                 DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
4794                 return -EIO;
4795         }
4796
4797         /* don't talk to the dongle if fw is about to be reloaded */
4798         if (dhd->hang_was_sent) {
4799                 DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
4800                         __FUNCTION__));
4801                 return -EIO;
4802         }
4803
4804         /* Fill up msgbuf for ioctl req */
4805         ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
4806
4807         DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
4808                 action, ifidx, cmd, len));
4809
4810         ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
4811
4812         return ret;
4813 }
4814
4815 /** Called by upper DHD layer. Handles a protocol control response asynchronously. */
4816 int dhd_prot_ctl_complete(dhd_pub_t *dhd)
4817 {
4818         return 0;
4819 }
4820
4821 /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
4822 int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
4823         void *params, int plen, void *arg, int len, bool set)
4824 {
4825         return BCME_UNSUPPORTED;
4826 }
4827
4828 /** Add prot dump output to a buffer */
4829 void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
4830 {
4831
4832 #if defined(PCIE_D2H_SYNC)
4833         if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
4834                 bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
4835         else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
4836                 bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
4837         else
4838                 bcm_bprintf(b, "\nd2h_sync: NONE:");
4839         bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
4840                 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
4841 #endif  /* PCIE_D2H_SYNC */
4842
4843         bcm_bprintf(b, "\nDongle DMA Indices: h2d %d  d2h %d index size %d bytes\n",
4844                 DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support),
4845                 DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support),
4846                 dhd->prot->rw_index_sz);
4847 }
4848
4849 /* Update local copy of dongle statistics */
4850 void dhd_prot_dstats(dhd_pub_t *dhd)
4851 {
4852         return;
4853 }
4854
4855 /** Called by upper DHD layer */
4856 int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
4857         uint reorder_info_len, void **pkt, uint32 *free_buf_count)
4858 {
4859         return 0;
4860 }
4861
4862 /** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
4863 int
4864 dhd_post_dummy_msg(dhd_pub_t *dhd)
4865 {
4866         unsigned long flags;
4867         hostevent_hdr_t *hevent = NULL;
4868         uint16 alloced = 0;
4869
4870         dhd_prot_t *prot = dhd->prot;
4871         msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
4872
4873         DHD_GENERAL_LOCK(dhd, flags);
4874
4875         hevent = (hostevent_hdr_t *)
4876                 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
4877
4878         if (hevent == NULL) {
4879                 DHD_GENERAL_UNLOCK(dhd, flags);
4880                 return -1;
4881         }
4882
4883         /* CMN msg header */
4884         hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4885         ring->seqnum++;
4886         hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
4887         hevent->msg.if_id = 0;
4888
4889         /* Event payload */
4890         hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
4891
4892         /* Since, we are filling the data directly into the bufptr obtained
4893          * from the msgbuf, we can directly call the write_complete
4894          */
4895         dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
4896         DHD_GENERAL_UNLOCK(dhd, flags);
4897
4898         return 0;
4899 }
4900
4901 /**
4902  * If exactly_nitems is true, this function will allocate space for nitems or fail
4903  * If exactly_nitems is false, this function will allocate space for nitems or less
4904  */
4905 static void * BCMFASTPATH
4906 dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
4907         uint16 nitems, uint16 * alloced, bool exactly_nitems)
4908 {
4909         void * ret_buf;
4910
4911         /* Alloc space for nitems in the ring */
4912         ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
4913
4914         if (ret_buf == NULL) {
4915                 /* if alloc failed , invalidate cached read ptr */
4916                 if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
4917                         ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
4918                 } else {
4919                         dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
4920                 }
4921
4922                 /* Try allocating once more */
4923                 ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
4924
4925                 if (ret_buf == NULL) {
4926                         DHD_INFO(("%s: Ring space not available  \n", ring->name));
4927                         return NULL;
4928                 }
4929         }
4930
4931         /* Return alloced space */
4932         return ret_buf;
4933 }
4934
4935 /**
4936  * Non inline ioct request.
4937  * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
4938  * Form a separate request buffer where a 4 byte cmn header is added in the front
4939  * buf contents from parent function is copied to remaining section of this buffer
4940  */
4941 static int
4942 dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
4943 {
4944         dhd_prot_t *prot = dhd->prot;
4945         ioctl_req_msg_t *ioct_rqst;
4946         void * ioct_buf;        /* For ioctl payload */
4947         uint16  rqstlen, resplen;
4948         unsigned long flags;
4949         uint16 alloced = 0;
4950         msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
4951
4952         rqstlen = len;
4953         resplen = len;
4954
4955         /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
4956         /* 8K allocation of dongle buffer fails */
4957         /* dhd doesnt give separate input & output buf lens */
4958         /* so making the assumption that input length can never be more than 1.5k */
4959         rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE);
4960
4961         DHD_GENERAL_LOCK(dhd, flags);
4962
4963         if (prot->ioctl_state) {
4964                 DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
4965                 DHD_GENERAL_UNLOCK(dhd, flags);
4966                 return BCME_BUSY;
4967         } else {
4968                 prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
4969         }
4970
4971         /* Request for cbuf space */
4972         ioct_rqst = (ioctl_req_msg_t*)
4973                 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
4974         if (ioct_rqst == NULL) {
4975                 DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
4976                 prot->ioctl_state = 0;
4977                 prot->curr_ioctl_cmd = 0;
4978                 prot->ioctl_received = IOCTL_WAIT;
4979                 DHD_GENERAL_UNLOCK(dhd, flags);
4980                 return -1;
4981         }
4982
4983         /* Common msg buf hdr */
4984         ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
4985         ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
4986         ioct_rqst->cmn_hdr.flags = 0;
4987         ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
4988         ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4989         ring->seqnum++;
4990
4991         ioct_rqst->cmd = htol32(cmd);
4992         prot->curr_ioctl_cmd = cmd;
4993         ioct_rqst->output_buf_len = htol16(resplen);
4994         prot->ioctl_trans_id++;
4995         ioct_rqst->trans_id = prot->ioctl_trans_id;
4996
4997         /* populate ioctl buffer info */
4998         ioct_rqst->input_buf_len = htol16(rqstlen);
4999         ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
5000         ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
5001         /* copy ioct payload */
5002         ioct_buf = (void *) prot->ioctbuf.va;
5003
5004         if (buf) {
5005                 memcpy(ioct_buf, buf, len);
5006         }
5007
5008         OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
5009
5010         if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) {
5011                 DHD_ERROR(("host ioct address unaligned !!!!! \n"));
5012         }
5013
5014         DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
5015                 ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
5016                 ioct_rqst->trans_id));
5017
5018         /* update ring's WR index and ring doorbell to dongle */
5019         dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
5020         DHD_GENERAL_UNLOCK(dhd, flags);
5021
5022         return 0;
5023 } /* dhd_fillup_ioct_reqst */
5024
5025
5026 /**
5027  * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
5028  * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
5029  * information is posted to the dongle.
5030  *
5031  * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
5032  * each flowring in pool of flowrings.
5033  *
5034  * returns BCME_OK=0 on success
5035  * returns non-zero negative error value on failure.
5036  */
5037 static int
5038 dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
5039         uint16 max_items, uint16 item_len, uint16 ringid)
5040 {
5041         int dma_buf_alloced = BCME_NOMEM;
5042         uint32 dma_buf_len = max_items * item_len;
5043         dhd_prot_t *prot = dhd->prot;
5044
5045         ASSERT(ring);
5046         ASSERT(name);
5047         ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
5048
5049         /* Init name */
5050         strncpy(ring->name, name, RING_NAME_MAX_LENGTH);
5051         ring->name[RING_NAME_MAX_LENGTH - 1] = '\0';
5052
5053         ring->idx = ringid;
5054
5055         ring->max_items = max_items;
5056         ring->item_len = item_len;
5057
5058         /* A contiguous space may be reserved for all flowrings */
5059         if (DHD_IS_FLOWRING(ringid) && (prot->flowrings_dma_buf.va)) {
5060                 /* Carve out from the contiguous DMA-able flowring buffer */
5061                 uint16 flowid;
5062                 uint32 base_offset;
5063
5064                 dhd_dma_buf_t *dma_buf = &ring->dma_buf;
5065                 dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
5066
5067                 flowid = DHD_RINGID_TO_FLOWID(ringid);
5068                 base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
5069
5070                 ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
5071
5072                 dma_buf->len = dma_buf_len;
5073                 dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
5074                 PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
5075                 PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
5076
5077                 /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
5078                 ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
5079
5080                 dma_buf->dmah   = rsv_buf->dmah;
5081                 dma_buf->secdma = rsv_buf->secdma;
5082
5083                 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
5084         } else {
5085                 /* Allocate a dhd_dma_buf */
5086                 dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
5087                 if (dma_buf_alloced != BCME_OK) {
5088                         return BCME_NOMEM;
5089                 }
5090         }
5091
5092         /* CAUTION: Save ring::base_addr in little endian format! */
5093         dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
5094
5095 #ifdef BCM_SECURE_DMA
5096         if (SECURE_DMA_ENAB(prot->osh)) {
5097                 ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t));
5098                 if (ring->dma_buf.secdma == NULL) {
5099                         goto free_dma_buf;
5100                 }
5101         }
5102 #endif /* BCM_SECURE_DMA */
5103
5104         DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
5105                 "ring start %p buf phys addr  %x:%x \n",
5106                 ring->name, ring->max_items, ring->item_len,
5107                 dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
5108                 ltoh32(ring->base_addr.low_addr)));
5109
5110         return BCME_OK;
5111
5112 #ifdef BCM_SECURE_DMA
5113 free_dma_buf:
5114         if (dma_buf_alloced == BCME_OK) {
5115                 dhd_dma_buf_free(dhd, &ring->dma_buf);
5116         }
5117 #endif /* BCM_SECURE_DMA */
5118
5119         return BCME_NOMEM;
5120
5121 } /* dhd_prot_ring_attach */
5122
5123
5124 /**
5125  * dhd_prot_ring_init - Post the common ring information to dongle.
5126  *
5127  * Used only for common rings.
5128  *
5129  * The flowrings information is passed via the create flowring control message
5130  * (tx_flowring_create_request_t) sent over the H2D control submission common
5131  * ring.
5132  */
5133 static void
5134 dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
5135 {
5136         ring->wr = 0;
5137         ring->rd = 0;
5138         ring->curr_rd = 0;
5139
5140         /* CAUTION: ring::base_addr already in Little Endian */
5141         dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
5142                 sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
5143         dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
5144                 sizeof(uint16), RING_MAX_ITEMS, ring->idx);
5145         dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
5146                 sizeof(uint16), RING_ITEM_LEN, ring->idx);
5147
5148         dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
5149                 sizeof(uint16), RING_WR_UPD, ring->idx);
5150         dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
5151                 sizeof(uint16), RING_RD_UPD, ring->idx);
5152
5153         /* ring inited */
5154         ring->inited = TRUE;
5155
5156 } /* dhd_prot_ring_init */
5157
5158
5159 /**
5160  * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
5161  * Reset WR and RD indices to 0.
5162  */
5163 static void
5164 dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
5165 {
5166         DHD_TRACE(("%s\n", __FUNCTION__));
5167
5168         dhd_dma_buf_reset(dhd, &ring->dma_buf);
5169
5170         ring->rd = ring->wr = 0;
5171         ring->curr_rd = 0;
5172 }
5173
5174
5175 /**
5176  * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
5177  * hanging off the msgbuf_ring.
5178  */
5179 static void
5180 dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
5181 {
5182         dhd_prot_t *prot = dhd->prot;
5183         ASSERT(ring);
5184
5185         ring->inited = FALSE;
5186         /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
5187
5188 #ifdef BCM_SECURE_DMA
5189         if (SECURE_DMA_ENAB(prot->osh)) {
5190                 SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma);
5191                 if (ring->dma_buf.secdma) {
5192                         MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t));
5193                 }
5194                 ring->dma_buf.secdma = NULL;
5195         }
5196 #endif /* BCM_SECURE_DMA */
5197
5198         /* If the DMA-able buffer was carved out of a pre-reserved contiguous
5199          * memory, then simply stop using it.
5200          */
5201         if (DHD_IS_FLOWRING(ring->idx) && (prot->flowrings_dma_buf.va)) {
5202                 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
5203                 memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
5204         } else {
5205                 dhd_dma_buf_free(dhd, &ring->dma_buf);
5206         }
5207
5208 } /* dhd_prot_ring_detach */
5209
5210
5211 /*
5212  * +----------------------------------------------------------------------------
5213  * Flowring Pool
5214  *
5215  * Unlike common rings, which are attached very early on (dhd_prot_attach),
5216  * flowrings are dynamically instantiated. Moreover, flowrings may require a
5217  * larger DMA-able buffer. To avoid issues with fragmented cache coherent
5218  * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
5219  * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
5220  *
5221  * Each DMA-able buffer may be allocated independently, or may be carved out
5222  * of a single large contiguous region that is registered with the protocol
5223  * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
5224  * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
5225  *
5226  * No flowring pool action is performed in dhd_prot_attach(), as the number
5227  * of h2d rings is not yet known.
5228  *
5229  * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
5230  * determine the number of flowrings required, and a pool of msgbuf_rings are
5231  * allocated and a DMA-able buffer (carved or allocated) is attached.
5232  * See: dhd_prot_flowrings_pool_attach()
5233  *
5234  * A flowring msgbuf_ring object may be fetched from this pool during flowring
5235  * creation, using the flowid. Likewise, flowrings may be freed back into the
5236  * pool on flowring deletion.
5237  * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
5238  *
5239  * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
5240  * are detached (returned back to the carved region or freed), and the pool of
5241  * msgbuf_ring and any objects allocated against it are freed.
5242  * See: dhd_prot_flowrings_pool_detach()
5243  *
5244  * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
5245  * state as-if upon an attach. All DMA-able buffers are retained.
5246  * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
5247  * pool attach will notice that the pool persists and continue to use it. This
5248  * will avoid the case of a fragmented DMA-able region.
5249  *
5250  * +----------------------------------------------------------------------------
5251  */
5252
5253 /* Fetch number of H2D flowrings given the total number of h2d rings */
5254 #define DHD_FLOWRINGS_POOL_TOTAL(h2d_rings_total) \
5255         ((h2d_rings_total) - BCMPCIE_H2D_COMMON_MSGRINGS)
5256
5257 /* Conversion of a flowid to a flowring pool index */
5258 #define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
5259         ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
5260
5261 /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
5262 #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
5263         (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + DHD_FLOWRINGS_POOL_OFFSET(flowid)
5264
5265 /* Traverse each flowring in the flowring pool, assigning ring and flowid */
5266 #define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) \
5267         for ((flowid) = DHD_FLOWRING_START_FLOWID, \
5268                  (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
5269                  (flowid) < (prot)->h2d_rings_total; \
5270                  (flowid)++, (ring)++)
5271
5272 /**
5273  * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
5274  *
5275  * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
5276  * Dongle includes common rings when it advertizes the number of H2D rings.
5277  * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
5278  * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
5279  *
5280  * dhd_prot_ring_attach is invoked to perform the actual initialization and
5281  * attaching the DMA-able buffer.
5282  *
5283  * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
5284  * initialized msgbuf_ring_t object.
5285  *
5286  * returns BCME_OK=0 on success
5287  * returns non-zero negative error value on failure.
5288  */
5289 static int
5290 dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
5291 {
5292         uint16 flowid;
5293         msgbuf_ring_t *ring;
5294         uint16 h2d_flowrings_total; /* exclude H2D common rings */
5295         dhd_prot_t *prot = dhd->prot;
5296         char ring_name[RING_NAME_MAX_LENGTH];
5297
5298         if (prot->h2d_flowrings_pool != NULL) {
5299                 return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
5300         }
5301
5302         ASSERT(prot->h2d_rings_total == 0);
5303
5304         /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
5305         prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
5306
5307         if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
5308                 DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
5309                         __FUNCTION__, prot->h2d_rings_total));
5310                 return BCME_ERROR;
5311         }
5312
5313         /* Subtract number of H2D common rings, to determine number of flowrings */
5314         h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total);
5315
5316         DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
5317
5318         /* Allocate pool of msgbuf_ring_t objects for all flowrings */
5319         prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
5320                 (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
5321
5322         if (prot->h2d_flowrings_pool == NULL) {
5323                 DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
5324                         __FUNCTION__, h2d_flowrings_total));
5325                 goto fail;
5326         }
5327
5328         /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
5329         FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
5330                 snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
5331                 ring_name[RING_NAME_MAX_LENGTH - 1] = '\0';
5332                 if (dhd_prot_ring_attach(dhd, ring, ring_name,
5333                         H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE,
5334                         DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
5335                         goto attach_fail;
5336                 }
5337         }
5338
5339         return BCME_OK;
5340
5341 attach_fail:
5342         dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
5343
5344 fail:
5345         prot->h2d_rings_total = 0;
5346         return BCME_NOMEM;
5347
5348 } /* dhd_prot_flowrings_pool_attach */
5349
5350
5351 /**
5352  * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
5353  * Invokes dhd_prot_ring_reset to perform the actual reset.
5354  *
5355  * The DMA-able buffer is not freed during reset and neither is the flowring
5356  * pool freed.
5357  *
5358  * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
5359  * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
5360  * from a previous flowring pool instantiation will be reused.
5361  *
5362  * This will avoid a fragmented DMA-able memory condition, if multiple
5363  * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
5364  * cycle.
5365  */
5366 static void
5367 dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
5368 {
5369         uint16 flowid;
5370         msgbuf_ring_t *ring;
5371         dhd_prot_t *prot = dhd->prot;
5372
5373         if (prot->h2d_flowrings_pool == NULL) {
5374                 ASSERT(prot->h2d_rings_total == 0);
5375                 return;
5376         }
5377
5378         /* Reset each flowring in the flowring pool */
5379         FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
5380                 dhd_prot_ring_reset(dhd, ring);
5381                 ring->inited = FALSE;
5382         }
5383
5384         /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
5385 }
5386
5387
5388 /**
5389  * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
5390  * DMA-able buffers for flowrings.
5391  * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
5392  * de-initialization of each msgbuf_ring_t.
5393  */
5394 static void
5395 dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
5396 {
5397         int flowid;
5398         msgbuf_ring_t *ring;
5399         int h2d_flowrings_total; /* exclude H2D common rings */
5400         dhd_prot_t *prot = dhd->prot;
5401
5402         if (prot->h2d_flowrings_pool == NULL) {
5403                 ASSERT(prot->h2d_rings_total == 0);
5404                 return;
5405         }
5406
5407         /* Detach the DMA-able buffer for each flowring in the flowring pool */
5408         FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
5409                 dhd_prot_ring_detach(dhd, ring);
5410         }
5411
5412         h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total);
5413
5414         MFREE(prot->osh, prot->h2d_flowrings_pool,
5415                 (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
5416
5417         prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL;
5418         prot->h2d_rings_total = 0;
5419
5420 } /* dhd_prot_flowrings_pool_detach */
5421
5422
5423 /**
5424  * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
5425  * msgbuf_ring from the flowring pool, and assign it.
5426  *
5427  * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
5428  * ring information to the dongle, a flowring's information is passed via a
5429  * flowring create control message.
5430  *
5431  * Only the ring state (WR, RD) index are initialized.
5432  */
5433 static msgbuf_ring_t *
5434 dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
5435 {
5436         msgbuf_ring_t *ring;
5437         dhd_prot_t *prot = dhd->prot;
5438
5439         ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
5440         ASSERT(flowid < prot->h2d_rings_total);
5441         ASSERT(prot->h2d_flowrings_pool != NULL);
5442
5443         ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
5444
5445         /* ASSERT flow_ring->inited == FALSE */
5446
5447         ring->wr = 0;
5448         ring->rd = 0;
5449         ring->curr_rd = 0;
5450         ring->inited = TRUE;
5451
5452         return ring;
5453 }
5454
5455
5456 /**
5457  * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
5458  * msgbuf_ring back to the flow_ring pool.
5459  */
5460 void
5461 dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
5462 {
5463         msgbuf_ring_t *ring;
5464         dhd_prot_t *prot = dhd->prot;
5465
5466         ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
5467         ASSERT(flowid < prot->h2d_rings_total);
5468         ASSERT(prot->h2d_flowrings_pool != NULL);
5469
5470         ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
5471
5472         ASSERT(ring == (msgbuf_ring_t*)flow_ring);
5473         /* ASSERT flow_ring->inited == TRUE */
5474
5475         (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
5476
5477         ring->wr = 0;
5478         ring->rd = 0;
5479         ring->inited = FALSE;
5480
5481         ring->curr_rd = 0;
5482 }
5483
5484
5485 /* Assumes only one index is updated at a time */
5486 /* If exactly_nitems is true, this function will allocate space for nitems or fail */
5487 /*    Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
5488 /* If exactly_nitems is false, this function will allocate space for nitems or less */
5489 static void *BCMFASTPATH
5490 dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
5491         bool exactly_nitems)
5492 {
5493         void *ret_ptr = NULL;
5494         uint16 ring_avail_cnt;
5495
5496         ASSERT(nitems <= ring->max_items);
5497
5498         ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
5499
5500         if ((ring_avail_cnt == 0) ||
5501                (exactly_nitems && (ring_avail_cnt < nitems) &&
5502                ((ring->max_items - ring->wr) >= nitems))) {
5503                 DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
5504                         ring->name, nitems, ring->wr, ring->rd));
5505                 return NULL;
5506         }
5507         *alloced = MIN(nitems, ring_avail_cnt);
5508
5509         /* Return next available space */
5510         ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
5511
5512         /* Update write index */
5513         if ((ring->wr + *alloced) == ring->max_items) {
5514                 ring->wr = 0;
5515         } else if ((ring->wr + *alloced) < ring->max_items) {
5516                 ring->wr += *alloced;
5517         } else {
5518                 /* Should never hit this */
5519                 ASSERT(0);
5520                 return NULL;
5521         }
5522
5523         return ret_ptr;
5524 } /* dhd_prot_get_ring_space */
5525
5526
5527 /**
5528  * dhd_prot_ring_write_complete - Host updates the new WR index on producing
5529  * new messages in a H2D ring. The messages are flushed from cache prior to
5530  * posting the new WR index. The new WR index will be updated in the DMA index
5531  * array or directly in the dongle's ring state memory.
5532  * A PCIE doorbell will be generated to wake up the dongle.
5533  */
5534 static void BCMFASTPATH
5535 dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
5536         uint16 nitems)
5537 {
5538         dhd_prot_t *prot = dhd->prot;
5539
5540         /* cache flush */
5541         OSL_CACHE_FLUSH(p, ring->item_len * nitems);
5542
5543         if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
5544                 dhd_prot_dma_indx_set(dhd, ring->wr,
5545                         H2D_DMA_INDX_WR_UPD, ring->idx);
5546         } else {
5547                 dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
5548                         sizeof(uint16), RING_WR_UPD, ring->idx);
5549         }
5550
5551         /* raise h2d interrupt */
5552         prot->mb_ring_fn(dhd->bus, ring->wr);
5553 }
5554
5555
5556 /**
5557  * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
5558  * from a D2H ring. The new RD index will be updated in the DMA Index array or
5559  * directly in dongle's ring state memory.
5560  */
5561 static void
5562 dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
5563 {
5564         /* update read index */
5565         /* If dma'ing h2d indices supported
5566          * update the r -indices in the
5567          * host memory o/w in TCM
5568          */
5569         if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
5570                 dhd_prot_dma_indx_set(dhd, ring->rd,
5571                         D2H_DMA_INDX_RD_UPD, ring->idx);
5572         } else {
5573                 dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
5574                         sizeof(uint16), RING_RD_UPD, ring->idx);
5575         }
5576 }
5577
5578
5579 /**
5580  * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
5581  * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
5582  * See dhd_prot_dma_indx_init()
5583  */
5584 static void
5585 dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
5586 {
5587         uint8 *ptr;
5588         uint16 offset;
5589         dhd_prot_t *prot = dhd->prot;
5590
5591         switch (type) {
5592                 case H2D_DMA_INDX_WR_UPD:
5593                         ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
5594                         offset = DHD_H2D_RING_OFFSET(ringid);
5595                         break;
5596
5597                 case D2H_DMA_INDX_RD_UPD:
5598                         ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
5599                         offset = DHD_D2H_RING_OFFSET(ringid);
5600                         break;
5601
5602                 default:
5603                         DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
5604                                 __FUNCTION__));
5605                         return;
5606         }
5607
5608         ASSERT(prot->rw_index_sz != 0);
5609         ptr += offset * prot->rw_index_sz;
5610
5611         *(uint16*)ptr = htol16(new_index);
5612
5613         OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
5614
5615         DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
5616                 __FUNCTION__, new_index, type, ringid, ptr, offset));
5617
5618 } /* dhd_prot_dma_indx_set */
5619
5620
5621 /**
5622  * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
5623  * array.
5624  * Dongle DMAes an entire array to host memory (if the feature is enabled).
5625  * See dhd_prot_dma_indx_init()
5626  */
5627 static uint16
5628 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
5629 {
5630         uint8 *ptr;
5631         uint16 data;
5632         uint16 offset;
5633         dhd_prot_t *prot = dhd->prot;
5634
5635         switch (type) {
5636                 case H2D_DMA_INDX_WR_UPD:
5637                         ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
5638                         offset = DHD_H2D_RING_OFFSET(ringid);
5639                         break;
5640
5641                 case H2D_DMA_INDX_RD_UPD:
5642                         ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
5643                         offset = DHD_H2D_RING_OFFSET(ringid);
5644                         break;
5645
5646                 case D2H_DMA_INDX_WR_UPD:
5647                         ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
5648                         offset = DHD_D2H_RING_OFFSET(ringid);
5649                         break;
5650
5651                 case D2H_DMA_INDX_RD_UPD:
5652                         ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
5653                         offset = DHD_D2H_RING_OFFSET(ringid);
5654                         break;
5655
5656                 default:
5657                         DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
5658                                 __FUNCTION__));
5659                         return 0;
5660         }
5661
5662         ASSERT(prot->rw_index_sz != 0);
5663         ptr += offset * prot->rw_index_sz;
5664
5665         OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
5666
5667         data = LTOH16(*((uint16*)ptr));
5668
5669         DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
5670                 __FUNCTION__, data, type, ringid, ptr, offset));
5671
5672         return (data);
5673
5674 } /* dhd_prot_dma_indx_get */
5675
5676 /**
5677  * An array of DMA read/write indices, containing information about host rings, can be maintained
5678  * either in host memory or in device memory, dependent on preprocessor options. This function is,
5679  * dependent on these options, called during driver initialization. It reserves and initializes
5680  * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
5681  * address of these host memory blocks are communicated to the dongle later on. By reading this host
5682  * memory, the dongle learns about the state of the host rings.
5683  */
5684
5685 static INLINE int
5686 dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
5687         dhd_dma_buf_t *dma_buf, uint32 bufsz)
5688 {
5689         int rc;
5690
5691         if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
5692                 return BCME_OK;
5693
5694         rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
5695
5696         return rc;
5697 }
5698
5699 int
5700 dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
5701 {
5702         uint32 bufsz;
5703         dhd_prot_t *prot = dhd->prot;
5704         dhd_dma_buf_t *dma_buf;
5705
5706         if (prot == NULL) {
5707                 DHD_ERROR(("prot is not inited\n"));
5708                 return BCME_ERROR;
5709         }
5710
5711         /* Dongle advertizes 2B or 4B RW index size */
5712         ASSERT(rw_index_sz != 0);
5713         prot->rw_index_sz = rw_index_sz;
5714
5715         bufsz = rw_index_sz * length;
5716
5717         switch (type) {
5718                 case H2D_DMA_INDX_WR_BUF:
5719                         dma_buf = &prot->h2d_dma_indx_wr_buf;
5720                         if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
5721                                 goto ret_no_mem;
5722                         }
5723                         DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
5724                                 dma_buf->len, rw_index_sz, length));
5725                         break;
5726
5727                 case H2D_DMA_INDX_RD_BUF:
5728                         dma_buf = &prot->h2d_dma_indx_rd_buf;
5729                         if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
5730                                 goto ret_no_mem;
5731                         }
5732                         DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
5733                                 dma_buf->len, rw_index_sz, length));
5734                         break;
5735
5736                 case D2H_DMA_INDX_WR_BUF:
5737                         dma_buf = &prot->d2h_dma_indx_wr_buf;
5738                         if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
5739                                 goto ret_no_mem;
5740                         }
5741                         DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
5742                                 dma_buf->len, rw_index_sz, length));
5743                         break;
5744
5745                 case D2H_DMA_INDX_RD_BUF:
5746                         dma_buf = &prot->d2h_dma_indx_rd_buf;
5747                         if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
5748                                 goto ret_no_mem;
5749                         }
5750                         DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
5751                                 dma_buf->len, rw_index_sz, length));
5752                         break;
5753
5754                 default:
5755                         DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
5756                         return BCME_BADOPTION;
5757         }
5758
5759         return BCME_OK;
5760
5761 ret_no_mem:
5762         DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
5763                 __FUNCTION__, type, bufsz));
5764         return BCME_NOMEM;
5765
5766 } /* dhd_prot_dma_indx_init */
5767
5768
5769 /**
5770  * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
5771  * from, or NULL if there are no more messages to read.
5772  */
5773 static uint8*
5774 dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
5775 {
5776         uint16 wr;
5777         uint16 rd;
5778         uint16 depth;
5779         uint16 items;
5780         void  *read_addr = NULL; /* address of next msg to be read in ring */
5781         uint16 d2h_wr = 0;
5782
5783         DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
5784                 __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
5785                 (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
5786
5787         /* Remember the read index in a variable.
5788          * This is becuase ring->rd gets updated in the end of this function
5789          * So if we have to print the exact read index from which the
5790          * message is read its not possible.
5791          */
5792         ring->curr_rd = ring->rd;
5793
5794         /* update write pointer */
5795         if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
5796                 /* DMAing write/read indices supported */
5797                 d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
5798                 ring->wr = d2h_wr;
5799         } else {
5800                 dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
5801         }
5802
5803         wr = ring->wr;
5804         rd = ring->rd;
5805         depth = ring->max_items;
5806
5807         /* check for avail space, in number of ring items */
5808         items = READ_AVAIL_SPACE(wr, rd, depth);
5809         if (items == 0) {
5810                 return NULL;
5811         }
5812
5813         ASSERT(items < ring->max_items);
5814
5815         /*
5816          * Note that there are builds where Assert translates to just printk
5817          * so, even if we had hit this condition we would never halt. Now
5818          * dhd_prot_process_msgtype can get into an big loop if this
5819          * happens.
5820          */
5821         if (items >= ring->max_items) {
5822                 DHD_ERROR(("\r\n======================= \r\n"));
5823                 DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
5824                         __FUNCTION__, ring, ring->name, ring->max_items, items));
5825                 DHD_ERROR(("wr: %d,  rd: %d,  depth: %d  \r\n", wr, rd, depth));
5826                 DHD_ERROR(("dhd->busstate %d bus->suspended %d bus->wait_for_d3_ack %d \r\n",
5827                         dhd->busstate, dhd->bus->suspended, dhd->bus->wait_for_d3_ack));
5828                 DHD_ERROR(("\r\n======================= \r\n"));
5829
5830                 *available_len = 0;
5831                 return NULL;
5832         }
5833
5834         /* if space is available, calculate address to be read */
5835         read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
5836
5837         /* update read pointer */
5838         if ((ring->rd + items) >= ring->max_items) {
5839                 ring->rd = 0;
5840         } else {
5841                 ring->rd += items;
5842         }
5843
5844         ASSERT(ring->rd < ring->max_items);
5845
5846         /* convert items to bytes : available_len must be 32bits */
5847         *available_len = (uint32)(items * ring->item_len);
5848
5849         OSL_CACHE_INV(read_addr, *available_len);
5850
5851         /* return read address */
5852         return read_addr;
5853
5854 } /* dhd_prot_get_read_addr */
5855
5856 /** Creates a flow ring and informs dongle of this event */
5857 int
5858 dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
5859 {
5860         tx_flowring_create_request_t *flow_create_rqst;
5861         msgbuf_ring_t *flow_ring;
5862         dhd_prot_t *prot = dhd->prot;
5863         unsigned long flags;
5864         uint16 alloced = 0;
5865         msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
5866
5867         /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
5868         flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
5869         if (flow_ring == NULL) {
5870                 DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
5871                         __FUNCTION__, flow_ring_node->flowid));
5872                 return BCME_NOMEM;
5873         }
5874
5875         DHD_GENERAL_LOCK(dhd, flags);
5876
5877         /* Request for ctrl_ring buffer space */
5878         flow_create_rqst = (tx_flowring_create_request_t *)
5879                 dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
5880
5881         if (flow_create_rqst == NULL) {
5882                 dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
5883                 DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
5884                         __FUNCTION__, flow_ring_node->flowid));
5885                 DHD_GENERAL_UNLOCK(dhd, flags);
5886                 return BCME_NOMEM;
5887         }
5888
5889         flow_ring_node->prot_info = (void *)flow_ring;
5890
5891         /* Common msg buf hdr */
5892         flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
5893         flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
5894         flow_create_rqst->msg.request_id = htol32(0); /* TBD */
5895
5896         flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
5897         ctrl_ring->seqnum++;
5898
5899         /* Update flow create message */
5900         flow_create_rqst->tid = flow_ring_node->flow_info.tid;
5901         flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
5902         memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
5903         memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
5904         /* CAUTION: ring::base_addr already in Little Endian */
5905         flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
5906         flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
5907         flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM);
5908         flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
5909         DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
5910                 " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
5911                 MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
5912                 flow_ring_node->flow_info.ifindex));
5913
5914         /* Update the flow_ring's WRITE index */
5915         if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
5916                 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
5917                         H2D_DMA_INDX_WR_UPD, flow_ring->idx);
5918         } else {
5919                 dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
5920                         sizeof(uint16), RING_WR_UPD, flow_ring->idx);
5921         }
5922
5923         /* update control subn ring's WR index and ring doorbell to dongle */
5924         dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
5925
5926         DHD_GENERAL_UNLOCK(dhd, flags);
5927
5928         return BCME_OK;
5929 } /* dhd_prot_flow_ring_create */
5930
5931 /** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
5932 static void
5933 dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
5934 {
5935         tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
5936
5937         DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
5938                 ltoh16(flow_create_resp->cmplt.status),
5939                 ltoh16(flow_create_resp->cmplt.flow_ring_id)));
5940
5941         dhd_bus_flow_ring_create_response(dhd->bus,
5942                 ltoh16(flow_create_resp->cmplt.flow_ring_id),
5943                 ltoh16(flow_create_resp->cmplt.status));
5944 }
5945
5946 /** called on e.g. flow ring delete */
5947 void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
5948 {
5949         msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
5950         dhd_prot_ring_detach(dhd, flow_ring);
5951         DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
5952 }
5953
5954 void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
5955         struct bcmstrbuf *strbuf, const char * fmt)
5956 {
5957         const char *default_fmt = "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d\n";
5958         msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
5959         uint16 rd, wr;
5960         uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
5961
5962         if (fmt == NULL) {
5963                 fmt = default_fmt;
5964         }
5965         dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
5966         dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
5967         bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
5968                 ltoh32(flow_ring->base_addr.high_addr),
5969                 ltoh32(flow_ring->base_addr.low_addr), dma_buf_len);
5970 }
5971
5972 void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
5973 {
5974         dhd_prot_t *prot = dhd->prot;
5975         bcm_bprintf(strbuf, "CtrlPost: ");
5976         dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf, NULL);
5977         bcm_bprintf(strbuf, "CtrlCpl: ");
5978         dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf, NULL);
5979
5980         bcm_bprintf(strbuf, "RxPost: ");
5981         bcm_bprintf(strbuf, "RBP %d ", prot->rxbufpost);
5982         dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf, NULL);
5983         bcm_bprintf(strbuf, "RxCpl: ");
5984         dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf, NULL);
5985
5986         bcm_bprintf(strbuf, "TxCpl: ");
5987         dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf, NULL);
5988         bcm_bprintf(strbuf, "active_tx_count %d  pktidmap_avail %d\n",
5989                 dhd->prot->active_tx_count,
5990                 DHD_PKTID_AVAIL(dhd->prot->pktid_map_handle));
5991 }
5992
5993 int
5994 dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
5995 {
5996         tx_flowring_delete_request_t *flow_delete_rqst;
5997         dhd_prot_t *prot = dhd->prot;
5998         unsigned long flags;
5999         uint16 alloced = 0;
6000         msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
6001
6002         DHD_GENERAL_LOCK(dhd, flags);
6003
6004         /* Request for ring buffer space */
6005         flow_delete_rqst = (tx_flowring_delete_request_t *)
6006                 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6007
6008         if (flow_delete_rqst == NULL) {
6009                 DHD_GENERAL_UNLOCK(dhd, flags);
6010                 DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
6011                 return BCME_NOMEM;
6012         }
6013
6014         /* Common msg buf hdr */
6015         flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
6016         flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
6017         flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
6018
6019         flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
6020         ring->seqnum++;
6021
6022         /* Update Delete info */
6023         flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
6024         flow_delete_rqst->reason = htol16(BCME_OK);
6025
6026         DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG
6027                 " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
6028                 MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
6029                 flow_ring_node->flow_info.ifindex));
6030
6031         /* update ring's WR index and ring doorbell to dongle */
6032         dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
6033         DHD_GENERAL_UNLOCK(dhd, flags);
6034
6035         return BCME_OK;
6036 }
6037
6038 static void
6039 dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
6040 {
6041         tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
6042
6043         DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
6044                 flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
6045
6046         dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
6047                 flow_delete_resp->cmplt.status);
6048 }
6049
6050 int
6051 dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
6052 {
6053         tx_flowring_flush_request_t *flow_flush_rqst;
6054         dhd_prot_t *prot = dhd->prot;
6055         unsigned long flags;
6056         uint16 alloced = 0;
6057         msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
6058
6059         DHD_GENERAL_LOCK(dhd, flags);
6060
6061         /* Request for ring buffer space */
6062         flow_flush_rqst = (tx_flowring_flush_request_t *)
6063                 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6064         if (flow_flush_rqst == NULL) {
6065                 DHD_GENERAL_UNLOCK(dhd, flags);
6066                 DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
6067                 return BCME_NOMEM;
6068         }
6069
6070         /* Common msg buf hdr */
6071         flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
6072         flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
6073         flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
6074
6075         flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
6076         ring->seqnum++;
6077
6078         flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
6079         flow_flush_rqst->reason = htol16(BCME_OK);
6080
6081         DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
6082
6083         /* update ring's WR index and ring doorbell to dongle */
6084         dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
6085         DHD_GENERAL_UNLOCK(dhd, flags);
6086
6087         return BCME_OK;
6088 } /* dhd_prot_flow_ring_flush */
6089
6090 static void
6091 dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
6092 {
6093         tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
6094
6095         DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
6096                 flow_flush_resp->cmplt.status));
6097
6098         dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
6099                 flow_flush_resp->cmplt.status);
6100 }
6101
6102 /**
6103  * Request dongle to configure soft doorbells for D2H rings. Host populated soft
6104  * doorbell information is transferred to dongle via the d2h ring config control
6105  * message.
6106  */
6107 void
6108 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
6109 {
6110 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
6111         uint16 ring_idx;
6112         uint8 *msg_next;
6113         void *msg_start;
6114         uint16 alloced = 0;
6115         unsigned long flags;
6116         dhd_prot_t *prot = dhd->prot;
6117         ring_config_req_t *ring_config_req;
6118         bcmpcie_soft_doorbell_t *soft_doorbell;
6119         msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
6120         const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
6121
6122         /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
6123         DHD_GENERAL_LOCK(dhd, flags);
6124         msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
6125
6126         if (msg_start == NULL) {
6127                 DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
6128                         __FUNCTION__, d2h_rings));
6129                 DHD_GENERAL_UNLOCK(dhd, flags);
6130                 return;
6131         }
6132
6133         msg_next = (uint8*)msg_start;
6134
6135         for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
6136
6137                 /* position the ring_config_req into the ctrl subm ring */
6138                 ring_config_req = (ring_config_req_t *)msg_next;
6139
6140                 /* Common msg header */
6141                 ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
6142                 ring_config_req->msg.if_id = 0;
6143                 ring_config_req->msg.flags = 0;
6144
6145                 ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
6146                 ctrl_ring->seqnum++;
6147
6148                 ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
6149
6150                 /* Ring Config subtype and d2h ring_id */
6151                 ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
6152                 ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
6153
6154                 /* Host soft doorbell configuration */
6155                 soft_doorbell = &prot->soft_doorbell[ring_idx];
6156
6157                 ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
6158                 ring_config_req->soft_doorbell.haddr.high =
6159                         htol32(soft_doorbell->haddr.high);
6160                 ring_config_req->soft_doorbell.haddr.low =
6161                         htol32(soft_doorbell->haddr.low);
6162                 ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
6163                 ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
6164
6165                 DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
6166                         __FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
6167                         ring_config_req->soft_doorbell.haddr.low,
6168                         ring_config_req->soft_doorbell.value));
6169
6170                 msg_next = msg_next + ctrl_ring->item_len;
6171         }
6172
6173         /* update control subn ring's WR index and ring doorbell to dongle */
6174         dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
6175         DHD_GENERAL_UNLOCK(dhd, flags);
6176 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
6177 }
6178
6179 static void
6180 dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg)
6181 {
6182         DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
6183                 __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
6184                 ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
6185 }
6186
6187 int
6188 dhd_prot_debug_info_print(dhd_pub_t *dhd)
6189 {
6190         dhd_prot_t *prot = dhd->prot;
6191         msgbuf_ring_t *ring;
6192         uint16 rd, wr;
6193         uint32 intstatus = 0;
6194         uint32 intmask = 0;
6195         uint32 mbintstatus = 0;
6196         uint32 d2h_mb_data = 0;
6197         uint32 dma_buf_len;
6198
6199         DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
6200
6201         ring = &prot->h2dring_ctrl_subn;
6202         dma_buf_len = ring->max_items * ring->item_len;
6203         DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
6204                 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
6205                 ltoh32(ring->base_addr.low_addr), dma_buf_len));
6206         DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
6207         dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
6208         dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
6209         DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
6210
6211         ring = &prot->d2hring_ctrl_cpln;
6212         dma_buf_len = ring->max_items * ring->item_len;
6213         DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
6214                 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
6215                 ltoh32(ring->base_addr.low_addr), dma_buf_len));
6216         DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
6217         dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
6218         dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
6219         DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
6220         DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum));
6221
6222         intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
6223         intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
6224         mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
6225         dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
6226
6227         DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
6228         DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n,",
6229                 intstatus, intmask, mbintstatus));
6230         DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, dhd->bus->def_intmask));
6231
6232         return 0;
6233 }
6234
6235 int
6236 dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
6237 {
6238         uint32 *ptr;
6239         uint32 value;
6240         uint32 i;
6241         uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
6242
6243         OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
6244                 dhd->prot->d2h_dma_indx_wr_buf.len);
6245
6246         ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
6247
6248         bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
6249
6250         bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
6251         value = ltoh32(*ptr);
6252         bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
6253         ptr++;
6254         value = ltoh32(*ptr);
6255         bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
6256
6257         ptr++;
6258         bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
6259         for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
6260                 value = ltoh32(*ptr);
6261                 bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
6262                 ptr++;
6263         }
6264
6265         OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
6266                 dhd->prot->h2d_dma_indx_rd_buf.len);
6267
6268         ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
6269
6270         bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
6271         value = ltoh32(*ptr);
6272         bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
6273         ptr++;
6274         value = ltoh32(*ptr);
6275         bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
6276         ptr++;
6277         value = ltoh32(*ptr);
6278         bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
6279
6280         return 0;
6281 }
6282
6283 uint32
6284 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
6285 {
6286         dhd_prot_t *prot = dhd->prot;
6287 #if DHD_DBG_SHOW_METADATA
6288         prot->metadata_dbg = val;
6289 #endif
6290         return (uint32)prot->metadata_dbg;
6291 }
6292
6293 uint32
6294 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
6295 {
6296         dhd_prot_t *prot = dhd->prot;
6297         return (uint32)prot->metadata_dbg;
6298 }
6299
6300 uint32
6301 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
6302 {
6303         dhd_prot_t *prot = dhd->prot;
6304         if (rx)
6305                 prot->rx_metadata_offset = (uint16)val;
6306         else
6307                 prot->tx_metadata_offset = (uint16)val;
6308         return dhd_prot_metadatalen_get(dhd, rx);
6309 }
6310
6311 uint32
6312 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
6313 {
6314         dhd_prot_t *prot = dhd->prot;
6315         if (rx)
6316                 return prot->rx_metadata_offset;
6317         else
6318                 return prot->tx_metadata_offset;
6319 }
6320
6321 /** optimization to write "n" tx items at a time to ring */
6322 uint32
6323 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
6324 {
6325         dhd_prot_t *prot = dhd->prot;
6326         if (set)
6327                 prot->txp_threshold = (uint16)val;
6328         val = prot->txp_threshold;
6329         return val;
6330 }
6331
6332 #ifdef DHD_RX_CHAINING
6333
6334 static INLINE void BCMFASTPATH
6335 dhd_rxchain_reset(rxchain_info_t *rxchain)
6336 {
6337         rxchain->pkt_count = 0;
6338 }
6339
6340 static void BCMFASTPATH
6341 dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
6342 {
6343         uint8 *eh;
6344         uint8 prio;
6345         dhd_prot_t *prot = dhd->prot;
6346         rxchain_info_t *rxchain = &prot->rxchain;
6347
6348         ASSERT(!PKTISCHAINED(pkt));
6349         ASSERT(PKTCLINK(pkt) == NULL);
6350         ASSERT(PKTCGETATTR(pkt) == 0);
6351
6352         eh = PKTDATA(dhd->osh, pkt);
6353         prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
6354
6355         if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
6356                 rxchain->h_da, rxchain->h_prio))) {
6357                 /* Different flow - First release the existing chain */
6358                 dhd_rxchain_commit(dhd);
6359         }
6360
6361         /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
6362         /* so that the chain can be handed off to CTF bridge as is. */
6363         if (rxchain->pkt_count == 0) {
6364                 /* First packet in chain */
6365                 rxchain->pkthead = rxchain->pkttail = pkt;
6366
6367                 /* Keep a copy of ptr to ether_da, ether_sa and prio */
6368                 rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
6369                 rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
6370                 rxchain->h_prio = prio;
6371                 rxchain->ifidx = ifidx;
6372                 rxchain->pkt_count++;
6373         } else {
6374                 /* Same flow - keep chaining */
6375                 PKTSETCLINK(rxchain->pkttail, pkt);
6376                 rxchain->pkttail = pkt;
6377                 rxchain->pkt_count++;
6378         }
6379
6380         if ((!ETHER_ISMULTI(rxchain->h_da)) &&
6381                 ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
6382                 (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
6383                 PKTSETCHAINED(dhd->osh, pkt);
6384                 PKTCINCRCNT(rxchain->pkthead);
6385                 PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
6386         } else {
6387                 dhd_rxchain_commit(dhd);
6388                 return;
6389         }
6390
6391         /* If we have hit the max chain length, dispatch the chain and reset */
6392         if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
6393                 dhd_rxchain_commit(dhd);
6394         }
6395 }
6396
6397 static void BCMFASTPATH
6398 dhd_rxchain_commit(dhd_pub_t *dhd)
6399 {
6400         dhd_prot_t *prot = dhd->prot;
6401         rxchain_info_t *rxchain = &prot->rxchain;
6402
6403         if (rxchain->pkt_count == 0)
6404                 return;
6405
6406         /* Release the packets to dhd_linux */
6407         dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
6408
6409         /* Reset the chain */
6410         dhd_rxchain_reset(rxchain);
6411 }
6412
6413 #endif /* DHD_RX_CHAINING */