2 * @file definition of host message ring functionality
3 * Provides type definitions and function prototypes used to link the
4 * DHD OS, bus, and protocol modules.
6 * Copyright (C) 1999-2016, Broadcom Corporation
8 * Unless you and Broadcom execute a separate written software license
9 * agreement governing use of this software, this software is licensed to you
10 * under the terms of the GNU General Public License version 2 (the "GPL"),
11 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12 * following added to such license:
14 * As a special exception, the copyright holders of this software give you
15 * permission to link this software with independent modules, and to copy and
16 * distribute the resulting executable under terms of your choice, provided that
17 * you also meet, for each linked independent module, the terms and conditions of
18 * the license of that module. An independent module is a module which is not
19 * derived from this software. The special exception does not apply to any
20 * modifications of the software.
22 * Notwithstanding the above, under no circumstances may you combine this
23 * software in any way with any other Broadcom software provided under a license
24 * other than the GPL, without Broadcom's express prior written consent.
27 * <<Broadcom-WL-IPTag/Open:>>
29 * $Id: dhd_msgbuf.c 605475 2015-12-10 12:49:49Z $
37 #include <bcmmsgbuf.h>
38 #include <bcmendian.h>
40 #include <dngl_stats.h>
42 #include <dhd_proto.h>
50 #include <dhd_flowring.h>
52 #include <pcie_core.h>
57 #include <linux/cpu.h>
59 #define DHD_LB_WORKQ_SZ (8192)
60 #define DHD_LB_WORKQ_SYNC (16)
61 #define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
66 * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
67 * address where a value must be written. Host may also interrupt coalescing
68 * on this soft doorbell.
69 * Use Case: Hosts with network processors, may register with the dongle the
70 * network processor's thread wakeup register and a value corresponding to the
71 * core/thread context. Dongle will issue a write transaction <address,value>
72 * to the PCIE RC which will need to be routed to the mapped register space, by
75 /* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
77 /* Dependency Check */
78 #if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
79 #error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
80 #endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
82 #define RETRIES 2 /* # of retries to retrieve matching ioctl response */
84 #define DEFAULT_RX_BUFFERS_TO_POST 256
85 #define RXBUFPOST_THRESHOLD 32
86 #define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */
88 #define DHD_STOP_QUEUE_THRESHOLD 200
89 #define DHD_START_QUEUE_THRESHOLD 100
91 #define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */
92 #define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
93 #define FLOWRING_SIZE (H2DRING_TXPOST_MAX_ITEM * H2DRING_TXPOST_ITEMSIZE)
95 /* flags for ioctl pending status */
96 #define MSGBUF_IOCTL_ACK_PENDING (1<<0)
97 #define MSGBUF_IOCTL_RESP_PENDING (1<<1)
99 #define DMA_ALIGN_LEN 4
101 #define DMA_D2H_SCRATCH_BUF_LEN 8
102 #define DMA_XFER_LEN_LIMIT 0x400000
104 #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
106 #define DHD_FLOWRING_MAX_EVENTBUF_POST 8
107 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
109 #define DHD_PROT_FUNCS 37
111 /* Length of buffer in host for bus throughput measurement */
112 #define DHD_BUS_TPUT_BUF_LEN 2048
114 #define TXP_FLUSH_NITEMS
116 /* optimization to write "n" tx items at a time to ring */
117 #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
119 #define RING_NAME_MAX_LENGTH 24
122 struct msgbuf_ring; /* ring context for common and flow rings */
125 * PCIE D2H DMA Complete Sync Modes
127 * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
128 * Host system memory. A WAR using one of 3 approaches is needed:
129 * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
130 * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
131 * writes in the last word of each work item. Each work item has a seqnum
132 * number = sequence num % 253.
134 * 3. Read Barrier: Dongle does a host memory read access prior to posting an
135 * interrupt, ensuring that D2H data transfer indeed completed.
136 * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
137 * ring contents before the indices.
139 * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
140 * callback (see dhd_prot_d2h_sync_none) may be bound.
142 * Dongle advertizes host side sync mechanism requirements.
144 #define PCIE_D2H_SYNC
146 #if defined(PCIE_D2H_SYNC)
147 #define PCIE_D2H_SYNC_WAIT_TRIES (512UL)
148 #define PCIE_D2H_SYNC_NUM_OF_STEPS (3UL)
149 #define PCIE_D2H_SYNC_DELAY (50UL) /* in terms of usecs */
152 * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
154 * On success: return cmn_msg_hdr_t::msg_type
155 * On failure: return 0 (invalid msg_type)
157 typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
158 volatile cmn_msg_hdr_t *msg, int msglen);
159 #endif /* PCIE_D2H_SYNC */
163 * +----------------------------------------------------------------------------
165 * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
168 * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
169 * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
171 * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
172 * BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
173 * BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings.
175 * H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated
176 * H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated
178 * D2H Control Complete RingId = 2
179 * D2H Transmit Complete RingId = 3
180 * D2H Receive Complete RingId = 4
182 * H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring)
183 * H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring)
184 * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
186 * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
187 * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
189 * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
190 * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
191 * FlowId values would be in the range [2..133] and the corresponding
192 * RingId values would be in the range [5..136].
194 * The flowId allocator, may chose to, allocate Flowids:
195 * bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
196 * X# of uc flowids in consecutive ranges (per station Id), where X is the
197 * packet's access category (e.g. 4 uc flowids per station).
200 * When DMA indices array feature is used, RingId=5, corresponding to the 0th
201 * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
202 * since the FlowId truly represents the index in the H2D DMA indices array.
204 * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
205 * will represent the index in the D2H DMA indices array.
207 * +----------------------------------------------------------------------------
210 /* First TxPost Flowring Id */
211 #define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS
213 /* Determine whether a ringid belongs to a TxPost flowring */
214 #define DHD_IS_FLOWRING(ringid) \
215 ((ringid) >= BCMPCIE_COMMON_MSGRINGS)
217 /* Convert a H2D TxPost FlowId to a MsgBuf RingId */
218 #define DHD_FLOWID_TO_RINGID(flowid) \
219 (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
221 /* Convert a MsgBuf RingId to a H2D TxPost FlowId */
222 #define DHD_RINGID_TO_FLOWID(ringid) \
223 (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
225 /* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
226 * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
227 * any array of H2D rings.
229 #define DHD_H2D_RING_OFFSET(ringid) \
230 ((DHD_IS_FLOWRING(ringid)) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
232 /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
233 * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
234 * any array of D2H rings.
236 #define DHD_D2H_RING_OFFSET(ringid) \
237 ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS)
239 /* Convert a D2H DMA Indices Offset to a RingId */
240 #define DHD_D2H_RINGID(offset) \
241 ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
244 #define DHD_DMAH_NULL ((void*)NULL)
247 * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
248 * buffer does not occupy the entire cacheline, and another object is placed
249 * following the DMA-able buffer, data corruption may occur if the DMA-able
250 * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
253 #if defined(L1_CACHE_BYTES)
254 #define DHD_DMA_PAD (L1_CACHE_BYTES)
256 #define DHD_DMA_PAD (128)
259 /* Used in loopback tests */
260 typedef struct dhd_dmaxfer {
261 dhd_dma_buf_t srcmem;
262 dhd_dma_buf_t dstmem;
270 * msgbuf_ring : This object manages the host side ring that includes a DMA-able
271 * buffer, the WR and RD indices, ring parameters such as max number of items
272 * an length of each items, and other miscellaneous runtime state.
273 * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
274 * H2D TxPost ring as specified in the PCIE FullDongle Spec.
275 * Ring parameters are conveyed to the dongle, which maintains its own peer end
276 * ring state. Depending on whether the DMA Indices feature is supported, the
277 * host will update the WR/RD index in the DMA indices array in host memory or
278 * directly in dongle memory.
280 typedef struct msgbuf_ring {
282 uint16 idx; /* ring id */
283 uint16 rd; /* read index */
284 uint16 curr_rd; /* read index for debug */
285 uint16 wr; /* write index */
286 uint16 max_items; /* maximum number of items in ring */
287 uint16 item_len; /* length of each item in the ring */
288 sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */
289 dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */
290 uint32 seqnum; /* next expected item's sequence number */
291 #ifdef TXP_FLUSH_NITEMS
293 /* # of messages on ring not yet announced to dongle */
294 uint16 pend_items_count;
295 #endif /* TXP_FLUSH_NITEMS */
296 uchar name[RING_NAME_MAX_LENGTH];
299 #define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
300 #define DHD_RING_END_VA(ring) \
301 ((uint8 *)(DHD_RING_BGN_VA((ring))) + \
302 (((ring)->max_items - 1) * (ring)->item_len))
306 /** DHD protocol handle. Is an opaque type to other DHD software layers. */
307 typedef struct dhd_prot {
308 osl_t *osh; /* OSL handle */
310 uint16 max_rxbufpost;
311 uint16 max_eventbufpost;
312 uint16 max_ioctlrespbufpost;
313 uint16 cur_event_bufs_posted;
314 uint16 cur_ioctlresp_bufs_posted;
316 /* Flow control mechanism based on active transmits pending */
317 uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */
319 uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */
321 /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
322 msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
323 msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
324 msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
325 msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
326 msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
328 msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
329 dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
330 uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */
332 uint32 rx_dataoffset;
334 dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */
336 /* ioctl related resources */
338 int16 ioctl_status; /* status returned from dongle */
339 uint16 ioctl_resplen;
340 dhd_ioctl_recieved_status_t ioctl_received;
342 dhd_dma_buf_t retbuf; /* For holding ioctl response */
343 dhd_dma_buf_t ioctbuf; /* For holding ioctl request */
345 dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */
347 /* DMA-able arrays for holding WR and RD indices */
348 uint32 rw_index_sz; /* Size of a RD or WR index in dongle */
349 dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */
350 dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */
351 dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */
352 dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */
354 dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */
356 dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */
359 #if defined(PCIE_D2H_SYNC)
360 d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
361 ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
362 ulong d2h_sync_wait_tot; /* total wait loops */
363 #endif /* PCIE_D2H_SYNC */
365 dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */
369 uint16 ioctl_trans_id;
370 void *pktid_map_handle; /* a pktid maps to a packet and its metadata */
372 void *pktid_map_handle_ioctl;
374 /* Applications/utilities can read tx and rx metadata using IOVARs */
375 uint16 rx_metadata_offset;
376 uint16 tx_metadata_offset;
379 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
380 /* Host's soft doorbell configuration */
381 bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
382 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
384 /* Work Queues to be used by the producer and the consumer, and threshold
385 * when the WRITE index must be synced to consumer's workq
387 #if defined(DHD_LB_TXC)
388 uint32 tx_compl_prod_sync ____cacheline_aligned;
389 bcm_workq_t tx_compl_prod, tx_compl_cons;
390 #endif /* DHD_LB_TXC */
391 #if defined(DHD_LB_RXC)
392 uint32 rx_compl_prod_sync ____cacheline_aligned;
393 bcm_workq_t rx_compl_prod, rx_compl_cons;
394 #endif /* DHD_LB_RXC */
398 /* Convert a dmaaddr_t to a base_addr with htol operations */
399 static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
401 /* APIs for managing a DMA-able buffer */
402 static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
403 static int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
404 static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
405 static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
407 /* msgbuf ring management */
408 static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
409 const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
410 static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
411 static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
412 static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
414 /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
415 static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
416 static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
417 static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
419 /* Fetch and Release a flowring msgbuf_ring from flowring pool */
420 static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
422 /* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
424 /* Producer: Allocate space in a msgbuf ring */
425 static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
426 uint16 nitems, uint16 *alloced, bool exactly_nitems);
427 static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
428 uint16 *alloced, bool exactly_nitems);
430 /* Consumer: Determine the location where the next message may be consumed */
431 static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
432 uint32 *available_len);
434 /* Producer (WR index update) or Consumer (RD index update) indication */
435 static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
436 void *p, uint16 len);
437 static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
439 /* Allocate DMA-able memory for saving H2D/D2H WR/RD indices */
440 static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
441 dhd_dma_buf_t *dma_buf, uint32 bufsz);
443 /* Set/Get a RD or WR index in the array of indices */
444 /* See also: dhd_prot_dma_indx_init() */
445 static void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
447 static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
449 /* Locate a packet given a pktid */
450 static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
452 /* Locate a packet given a PktId and free it. */
453 static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
455 static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
456 void *buf, uint len, uint8 action);
457 static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
458 void *buf, uint len, uint8 action);
459 static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
460 static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
461 void *buf, int ifidx);
463 /* Post buffers for Rx, control ioctl response and events */
464 static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post);
465 static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
466 static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
467 static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
468 static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
470 static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
472 /* D2H Message handling */
473 static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
475 /* D2H Message handlers */
476 static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
477 static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
478 static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
479 static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
480 static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
481 static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
482 static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg);
483 static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
485 /* Loopback test with dongle */
486 static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
487 static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
488 uint destdelay, dhd_dmaxfer_t *dma);
489 static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
491 /* Flowring management communication with dongle */
492 static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
493 static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
494 static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
496 /* Configure a soft doorbell per D2H ring */
497 static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
498 static void dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg);
500 typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
502 /** callback functions for messages generated by the dongle */
503 #define MSG_TYPE_INVALID 0
505 static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
506 dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
507 dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
508 dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
510 dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
512 dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
514 dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
516 dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
518 dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
520 dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
522 dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
524 dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */
526 dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
527 NULL, /* MSG_TYPE_FLOW_RING_RESUME */
528 NULL, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
529 NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
530 NULL, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
531 NULL, /* MSG_TYPE_INFO_BUF_POST */
532 NULL, /* MSG_TYPE_INFO_BUF_CMPLT */
533 NULL, /* MSG_TYPE_H2D_RING_CREATE */
534 NULL, /* MSG_TYPE_D2H_RING_CREATE */
535 NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
536 NULL, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
537 NULL, /* MSG_TYPE_H2D_RING_CONFIG */
538 NULL, /* MSG_TYPE_D2H_RING_CONFIG */
539 NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
540 dhd_prot_d2h_ring_config_cmplt_process, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
541 NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
542 NULL, /* MSG_TYPE_D2H_MAILBOX_DATA */
546 #ifdef DHD_RX_CHAINING
548 #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
549 (!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
550 !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
551 !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
552 !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
553 ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
554 ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
555 (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))) && \
556 dhd_l2_filter_chainable((dhd), (evh), (ifidx)))
558 static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
559 static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
560 static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
562 #define DHD_PKT_CTF_MAX_CHAIN_LEN 64
564 #endif /* DHD_RX_CHAINING */
566 static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
568 #if defined(PCIE_D2H_SYNC) /* avoids problems related to host CPU cache */
571 * D2H DMA to completion callback handlers. Based on the mode advertised by the
572 * dongle through the PCIE shared region, the appropriate callback will be
573 * registered in the proto layer to be invoked prior to precessing any message
574 * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
575 * does not require host participation, then a noop callback handler will be
576 * bound that simply returns the msg_type.
578 static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring,
579 uint32 tries, uchar *msg, int msglen);
580 static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
581 volatile cmn_msg_hdr_t *msg, int msglen);
582 static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
583 volatile cmn_msg_hdr_t *msg, int msglen);
584 static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
585 volatile cmn_msg_hdr_t *msg, int msglen);
586 static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
588 void dhd_prot_collect_memdump(dhd_pub_t *dhd)
590 DHD_ERROR(("%s(): Collecting mem dump now \r\n", __FUNCTION__));
591 #ifdef DHD_FW_COREDUMP
592 if (dhd->memdump_enabled) {
593 /* collect core dump */
594 dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
595 dhd_bus_mem_dump(dhd);
597 #endif /* DHD_FW_COREDUMP */
598 #ifdef SUPPORT_LINKDOWN_RECOVERY
599 #ifdef CONFIG_ARCH_MSM
600 dhd->bus->no_cfg_restore = 1;
601 #endif /* CONFIG_ARCH_MSM */
602 dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
603 dhd_os_send_hang_message(dhd);
604 #endif /* SUPPORT_LINKDOWN_RECOVERY */
608 * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
609 * not completed, a livelock condition occurs. Host will avert this livelock by
610 * dropping this message and moving to the next. This dropped message can lead
611 * to a packet leak, or even something disastrous in the case the dropped
612 * message happens to be a control response.
613 * Here we will log this condition. One may choose to reboot the dongle.
617 dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 tries,
618 uchar *msg, int msglen)
620 uint32 seqnum = ring->seqnum;
622 DHD_ERROR(("LIVELOCK DHD<%p> name<%s> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>"
623 "dma_buf va<%p> msg<%p> curr_rd<%d>\n",
624 dhd, ring->name, seqnum, seqnum% D2H_EPOCH_MODULO, tries,
625 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
626 ring->dma_buf.va, msg, ring->curr_rd));
627 prhex("D2H MsgBuf Failure", (uchar *)msg, msglen);
628 dhd_dump_to_kernelog(dhd);
630 #ifdef DHD_FW_COREDUMP
631 if (dhd->memdump_enabled) {
632 /* collect core dump */
633 dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
634 dhd_bus_mem_dump(dhd);
636 #endif /* DHD_FW_COREDUMP */
637 #ifdef SUPPORT_LINKDOWN_RECOVERY
638 #ifdef CONFIG_ARCH_MSM
639 dhd->bus->no_cfg_restore = 1;
640 #endif /* CONFIG_ARCH_MSM */
641 dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
642 dhd_os_send_hang_message(dhd);
643 #endif /* SUPPORT_LINKDOWN_RECOVERY */
647 * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
648 * mode. Sequence number is always in the last word of a message.
650 static uint8 BCMFASTPATH
651 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
652 volatile cmn_msg_hdr_t *msg, int msglen)
655 uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
656 int num_words = msglen / sizeof(uint32); /* num of 32bit words */
657 volatile uint32 *marker = (uint32 *)msg + (num_words - 1); /* last word */
658 dhd_prot_t *prot = dhd->prot;
660 uint32 delay = PCIE_D2H_SYNC_DELAY;
661 uint32 total_tries = 0;
663 ASSERT(msglen == ring->item_len);
665 BCM_REFERENCE(delay);
667 * For retries we have to make some sort of stepper algorithm.
668 * We see that every time when the Dongle comes out of the D3
669 * Cold state, the first D2H mem2mem DMA takes more time to
670 * complete, leading to livelock issues.
672 * Case 1 - Apart from Host CPU some other bus master is
673 * accessing the DDR port, probably page close to the ring
674 * so, PCIE does not get a change to update the memory.
675 * Solution - Increase the number of tries.
677 * Case 2 - The 50usec delay given by the Host CPU is not
678 * sufficient for the PCIe RC to start its work.
679 * In this case the breathing time of 50usec given by
680 * the Host CPU is not sufficient.
681 * Solution: Increase the delay in a stepper fashion.
682 * This is done to ensure that there are no
683 * unwanted extra delay introdcued in normal conditions.
685 for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
686 for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
687 uint32 msg_seqnum = *marker;
688 if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
689 ring->seqnum++; /* next expected sequence number */
693 total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
695 if (total_tries > prot->d2h_sync_wait_max)
696 prot->d2h_sync_wait_max = total_tries;
698 OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
699 OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
700 #if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)
701 /* For ARM there is no pause in cpu_relax, so add extra delay */
702 OSL_DELAY(delay * step);
703 #endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */
704 } /* for PCIE_D2H_SYNC_WAIT_TRIES */
705 } /* for number of steps */
707 dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen);
709 ring->seqnum++; /* skip this message ... leak of a pktid */
710 return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
714 prot->d2h_sync_wait_tot += total_tries;
715 return msg->msg_type;
719 * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
720 * mode. The xorcsum is placed in the last word of a message. Dongle will also
721 * place a seqnum in the epoch field of the cmn_msg_hdr.
723 static uint8 BCMFASTPATH
724 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
725 volatile cmn_msg_hdr_t *msg, int msglen)
728 uint32 prot_checksum = 0; /* computed checksum */
729 int num_words = msglen / sizeof(uint32); /* num of 32bit words */
730 uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
731 dhd_prot_t *prot = dhd->prot;
733 uint32 delay = PCIE_D2H_SYNC_DELAY;
734 uint32 total_tries = 0;
736 ASSERT(msglen == ring->item_len);
738 BCM_REFERENCE(delay);
741 * For retries we have to make some sort of stepper algorithm.
742 * We see that every time when the Dongle comes out of the D3
743 * Cold state, the first D2H mem2mem DMA takes more time to
744 * complete, leading to livelock issues.
746 * Case 1 - Apart from Host CPU some other bus master is
747 * accessing the DDR port, probably page close to the ring
748 * so, PCIE does not get a change to update the memory.
749 * Solution - Increase the number of tries.
751 * Case 2 - The 50usec delay given by the Host CPU is not
752 * sufficient for the PCIe RC to start its work.
753 * In this case the breathing time of 50usec given by
754 * the Host CPU is not sufficient.
755 * Solution: Increase the delay in a stepper fashion.
756 * This is done to ensure that there are no
757 * unwanted extra delay introdcued in normal conditions.
759 for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
760 for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
761 prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words);
762 if (prot_checksum == 0U) { /* checksum is OK */
763 if (msg->epoch == ring_seqnum) {
764 ring->seqnum++; /* next expected sequence number */
769 total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
771 if (total_tries > prot->d2h_sync_wait_max)
772 prot->d2h_sync_wait_max = total_tries;
774 OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
775 OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
776 #if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)
777 /* For ARM there is no pause in cpu_relax, so add extra delay */
778 OSL_DELAY(delay * step);
779 #endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */
781 } /* for PCIE_D2H_SYNC_WAIT_TRIES */
782 } /* for number of steps */
784 dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen);
786 ring->seqnum++; /* skip this message ... leak of a pktid */
787 return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
791 prot->d2h_sync_wait_tot += total_tries;
792 return msg->msg_type;
796 * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
797 * need to try to sync. This noop sync handler will be bound when the dongle
798 * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
800 static uint8 BCMFASTPATH
801 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
802 volatile cmn_msg_hdr_t *msg, int msglen)
804 return msg->msg_type;
808 * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
812 dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
814 dhd_prot_t *prot = dhd->prot;
815 prot->d2h_sync_wait_max = 0UL;
816 prot->d2h_sync_wait_tot = 0UL;
818 prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
819 prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
820 prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
822 if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
823 prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
824 } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
825 prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
827 prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
831 #endif /* PCIE_D2H_SYNC */
834 dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
836 /* To synchronize with the previous memory operations call wmb() */
838 dhd->prot->ioctl_received = reason;
839 /* Call another wmb() to make sure before waking up the other event value gets updated */
841 dhd_os_ioctl_resp_wake(dhd);
846 * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
849 dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
851 dhd_prot_t *prot = dhd->prot;
852 prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
853 prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
856 /* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */
860 * +---------------------------------------------------------------------------+
861 * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
862 * virtual and physical address, the buffer lenght and the DMA handler.
863 * A secdma handler is also included in the dhd_dma_buf object.
864 * +---------------------------------------------------------------------------+
868 dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
870 base_addr->low_addr = htol32(PHYSADDRLO(pa));
871 base_addr->high_addr = htol32(PHYSADDRHI(pa));
876 * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
879 dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
881 uint32 base, end; /* dongle uses 32bit ptr arithmetic */
884 base = PHYSADDRLO(dma_buf->pa);
886 ASSERT(ISALIGNED(base, DMA_ALIGN_LEN));
887 ASSERT(dma_buf->len != 0);
889 /* test 32bit offset arithmetic over dma buffer for loss of carry-over */
890 end = (base + dma_buf->len); /* end address */
892 if ((end & 0xFFFFFFFF) < (base & 0xFFFFFFFF)) { /* exclude carryover */
893 DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
894 __FUNCTION__, base, dma_buf->len));
902 * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
903 * returns BCME_OK=0 on success
904 * returns non-zero negative error value on failure.
907 dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
910 osl_t *osh = dhd->osh;
912 ASSERT(dma_buf != NULL);
913 ASSERT(dma_buf->va == NULL);
914 ASSERT(dma_buf->len == 0);
916 /* Pad the buffer length by one extra cacheline size.
917 * Required for D2H direction.
919 dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
920 dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
921 DMA_ALIGN_LEN, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
923 if (dma_buf->va == NULL) {
924 DHD_ERROR(("%s: buf_len %d, no memory available\n",
925 __FUNCTION__, buf_len));
929 dma_buf->len = buf_len; /* not including padded len */
931 if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
932 dhd_dma_buf_free(dhd, dma_buf);
936 dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
942 * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
945 dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
947 if ((dma_buf == NULL) || (dma_buf->va == NULL)) {
951 (void)dhd_dma_buf_audit(dhd, dma_buf);
953 /* Zero out the entire buffer and cache flush */
954 memset((void*)dma_buf->va, 0, dma_buf->len);
955 OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
959 * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
960 * dhd_dma_buf_alloc().
963 dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
965 osl_t *osh = dhd->osh;
969 if (dma_buf->va == NULL) {
970 return; /* Allow for free invocation, when alloc failed */
973 /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
974 (void)dhd_dma_buf_audit(dhd, dma_buf);
976 /* dma buffer may have been padded at allocation */
977 DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
978 dma_buf->pa, dma_buf->dmah);
980 memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
984 * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
985 * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
988 dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
989 void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
991 dhd_dma_buf_t *dma_buf;
993 dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
997 dma_buf->dmah = dmah;
998 dma_buf->secdma = secdma;
1000 /* Audit user defined configuration */
1001 (void)dhd_dma_buf_audit(dhd, dma_buf);
1004 /* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */
1007 * +---------------------------------------------------------------------------+
1008 * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
1009 * Main purpose is to save memory on the dongle, has other purposes as well.
1010 * The packet id map, also includes storage for some packet parameters that
1011 * may be saved. A native packet pointer along with the parameters may be saved
1012 * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
1013 * and the metadata may be retrieved using the previously allocated packet id.
1014 * +---------------------------------------------------------------------------+
1016 #define DHD_PCIE_PKTID
1017 #define MAX_PKTID_ITEMS (3072) /* Maximum number of pktids supported */
1019 /* On Router, the pktptr serves as a pktid. */
1022 #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
1023 #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
1026 /* Enum for marking the buffer color based on usage */
1027 typedef enum dhd_pkttype {
1028 PKTTYPE_DATA_TX = 0,
1032 /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
1036 #define DHD_PKTID_INVALID (0U)
1037 #define DHD_IOCTL_REQ_PKTID (0xFFFE)
1038 #define DHD_FAKE_PKTID (0xFACE)
1040 #define DHD_PKTID_FREE_LOCKER (FALSE)
1041 #define DHD_PKTID_RSV_LOCKER (TRUE)
1043 typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
1045 /* Construct a packet id mapping table, returning an opaque map handle */
1046 static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index);
1048 /* Destroy a packet id mapping table, freeing all packets active in the table */
1049 static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
1051 #define PKTID_MAP_HANDLE (0)
1052 #define PKTID_MAP_HANDLE_IOCTL (1)
1054 #define DHD_NATIVE_TO_PKTID_INIT(dhd, items, index) dhd_pktid_map_init((dhd), (items), (index))
1055 #define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map))
1057 #if defined(DHD_PCIE_PKTID)
1060 /* Determine number of pktids that are available */
1061 static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
1063 /* Allocate a unique pktid against which a pkt and some metadata is saved */
1064 static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1066 static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1067 void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
1068 void *dmah, void *secdma, dhd_pkttype_t pkttype);
1069 static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1070 void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
1071 void *dmah, void *secdma, dhd_pkttype_t pkttype);
1073 /* Return an allocated pktid, retrieving previously saved pkt and metadata */
1074 static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1075 uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
1076 void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
1079 * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
1081 * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
1082 * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
1084 * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
1085 * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
1087 #if defined(DHD_PKTID_AUDIT_ENABLED)
1088 #define USE_DHD_PKTID_AUDIT_LOCK 1
1089 /* Audit the pktidmap allocator */
1090 /* #define DHD_PKTID_AUDIT_MAP */
1092 /* Audit the pktid during production/consumption of workitems */
1093 #define DHD_PKTID_AUDIT_RING
1095 #if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
1096 #error "May only enabled audit of MAP or RING, at a time."
1097 #endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
1099 #define DHD_DUPLICATE_ALLOC 1
1100 #define DHD_DUPLICATE_FREE 2
1101 #define DHD_TEST_IS_ALLOC 3
1102 #define DHD_TEST_IS_FREE 4
1104 #ifdef USE_DHD_PKTID_AUDIT_LOCK
1105 #define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
1106 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
1107 #define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock)
1108 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
1110 #define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1)
1111 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0)
1112 #define DHD_PKTID_AUDIT_LOCK(lock) 0
1113 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0)
1114 #endif /* !USE_DHD_PKTID_AUDIT_LOCK */
1116 #endif /* DHD_PKTID_AUDIT_ENABLED */
1118 /* #define USE_DHD_PKTID_LOCK 1 */
1120 #ifdef USE_DHD_PKTID_LOCK
1121 #define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
1122 #define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
1123 #define DHD_PKTID_LOCK(lock) dhd_os_spin_lock(lock)
1124 #define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
1126 #define DHD_PKTID_LOCK_INIT(osh) (void *)(1)
1127 #define DHD_PKTID_LOCK_DEINIT(osh, lock) \
1129 BCM_REFERENCE(osh); \
1130 BCM_REFERENCE(lock); \
1132 #define DHD_PKTID_LOCK(lock) 0
1133 #define DHD_PKTID_UNLOCK(lock, flags) \
1135 BCM_REFERENCE(lock); \
1136 BCM_REFERENCE(flags); \
1138 #endif /* !USE_DHD_PKTID_LOCK */
1140 /* Packet metadata saved in packet id mapper */
1142 /* The Locker can be 3 states
1143 * LOCKER_IS_FREE - Locker is free and can be allocated
1144 * LOCKER_IS_BUSY - Locker is assigned and is being used, values in the
1145 * locker (buffer address, len, phy addr etc) are populated
1147 * LOCKER_IS_RSVD - The locker is reserved for future use, but the values
1148 * in the locker are not valid. Especially pkt should be
1149 * NULL in this state. When the user wants to re-use the
1150 * locker dhd_pktid_map_free can be called with a flag
1151 * to reserve the pktid for future use, which will clear
1152 * the contents of the locker. When the user calls
1153 * dhd_pktid_map_save the locker would move to LOCKER_IS_BUSY
1155 typedef enum dhd_locker_state {
1159 } dhd_locker_state_t;
1161 typedef struct dhd_pktid_item {
1162 dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */
1163 uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */
1164 dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
1165 uint16 len; /* length of mapped packet's buffer */
1166 void *pkt; /* opaque native pointer to a packet */
1167 dmaaddr_t pa; /* physical address of mapped packet's buffer */
1168 void *dmah; /* handle to OS specific DMA map */
1172 typedef struct dhd_pktid_map {
1173 uint32 items; /* total items in map */
1174 uint32 avail; /* total available items */
1175 int failures; /* lockers unavailable count */
1176 /* Spinlock to protect dhd_pktid_map in process/tasklet context */
1177 void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
1179 #if defined(DHD_PKTID_AUDIT_ENABLED)
1180 void *pktid_audit_lock;
1181 struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
1182 #endif /* DHD_PKTID_AUDIT_ENABLED */
1184 uint32 keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */
1185 dhd_pktid_item_t lockers[0]; /* metadata storage */
1189 * PktId (Locker) #0 is never allocated and is considered invalid.
1191 * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
1192 * depleted pktid pool and must not be used by the caller.
1194 * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
1197 #define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
1198 #define DHD_PKIDMAP_ITEMS(items) (items)
1199 #define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \
1200 (DHD_PKTID_ITEM_SZ * ((items) + 1)))
1202 #define DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, map) dhd_pktid_map_fini_ioctl((dhd), (map))
1204 /* Convert a packet to a pktid, and save pkt pointer in busy locker */
1205 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) dhd_pktid_map_reserve((dhd), (map), (pkt))
1207 /* Reuse a previously reserved locker to save packet params */
1208 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
1209 dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
1210 (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1211 (dhd_pkttype_t)(pkttype))
1213 /* Convert a packet to a pktid, and save packet params in locker */
1214 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
1215 dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
1216 (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1217 (dhd_pkttype_t)(pkttype))
1219 /* Convert pktid to a packet, and free the locker */
1220 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1221 dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1222 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1223 (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
1225 /* Convert the pktid to a packet, empty locker, but keep it reserved */
1226 #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1227 dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1228 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1229 (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
1231 #define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
1233 #if defined(DHD_PKTID_AUDIT_ENABLED)
1235 static int dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
1236 const int test_for, const char *errmsg);
1239 * dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
1242 dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
1243 const int test_for, const char *errmsg)
1245 #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
1247 const uint32 max_pktid_items = (MAX_PKTID_ITEMS);
1248 struct bcm_mwbmap *handle;
1252 if (pktid_map == (dhd_pktid_map_t *)NULL) {
1253 DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
1257 flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
1259 handle = pktid_map->pktid_audit;
1260 if (handle == (struct bcm_mwbmap *)NULL) {
1261 DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
1262 DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1266 /* Exclude special pktids from audit */
1267 ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
1269 DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1273 if ((pktid == DHD_PKTID_INVALID) || (pktid > max_pktid_items)) {
1274 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
1275 /* lock is released in "error" */
1281 case DHD_DUPLICATE_ALLOC:
1282 if (!bcm_mwbmap_isfree(handle, pktid)) {
1283 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
1287 bcm_mwbmap_force(handle, pktid);
1290 case DHD_DUPLICATE_FREE:
1291 if (bcm_mwbmap_isfree(handle, pktid)) {
1292 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
1296 bcm_mwbmap_free(handle, pktid);
1299 case DHD_TEST_IS_ALLOC:
1300 if (bcm_mwbmap_isfree(handle, pktid)) {
1301 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
1307 case DHD_TEST_IS_FREE:
1308 if (!bcm_mwbmap_isfree(handle, pktid)) {
1309 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
1319 DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1324 DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1325 /* May insert any trap mechanism here ! */
1326 dhd_pktid_audit_fail_cb(dhd);
1331 #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
1332 dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
1334 #endif /* DHD_PKTID_AUDIT_ENABLED */
1336 /* +------------------ End of PCIE DHD PKTID AUDIT ------------------------+ */
1340 * +---------------------------------------------------------------------------+
1341 * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
1343 * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS].
1345 * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
1346 * packet id is returned. This unique packet id may be used to retrieve the
1347 * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
1348 * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
1349 * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
1351 * Implementation Note:
1352 * Convert this into a <key,locker> abstraction and place into bcmutils !
1353 * Locker abstraction should treat contents as opaque storage, and a
1354 * callback should be registered to handle busy lockers on destructor.
1356 * +---------------------------------------------------------------------------+
1359 /** Allocate and initialize a mapper of num_items <numbered_key, locker> */
1361 static dhd_pktid_map_handle_t *
1362 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index)
1366 dhd_pktid_map_t *map;
1367 uint32 dhd_pktid_map_sz;
1369 #ifdef DHD_USE_STATIC_PKTIDMAP
1371 #endif /* DHD_USE_STATIC_PKTIDMAP */
1374 ASSERT((num_items >= 1) && (num_items <= MAX_PKTID_ITEMS));
1375 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
1377 #ifdef DHD_USE_STATIC_PKTIDMAP
1378 if (index == PKTID_MAP_HANDLE) {
1379 section = DHD_PREALLOC_PKTID_MAP;
1381 section = DHD_PREALLOC_PKTID_MAP_IOCTL;
1384 map = (dhd_pktid_map_t *)DHD_OS_PREALLOC(dhd, section, dhd_pktid_map_sz);
1386 map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz);
1387 #endif /* DHD_USE_STATIC_PKTIDMAP */
1390 DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
1391 __FUNCTION__, __LINE__, dhd_pktid_map_sz));
1395 bzero(map, dhd_pktid_map_sz);
1397 /* Initialize the lock that protects this structure */
1398 map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
1399 if (map->pktid_lock == NULL) {
1400 DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
1404 map->items = num_items;
1405 map->avail = num_items;
1407 map_items = DHD_PKIDMAP_ITEMS(map->items);
1409 #if defined(DHD_PKTID_AUDIT_ENABLED)
1410 /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
1411 map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
1412 if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
1413 DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
1416 DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
1417 __FUNCTION__, __LINE__, map_items + 1));
1420 map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
1422 #endif /* DHD_PKTID_AUDIT_ENABLED */
1424 for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
1425 map->keys[nkey] = nkey; /* populate with unique keys */
1426 map->lockers[nkey].state = LOCKER_IS_FREE;
1427 map->lockers[nkey].pkt = NULL; /* bzero: redundant */
1428 map->lockers[nkey].len = 0;
1431 /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be busy */
1432 map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY;
1433 map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */
1434 map->lockers[DHD_PKTID_INVALID].len = 0;
1436 #if defined(DHD_PKTID_AUDIT_ENABLED)
1437 /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
1438 bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
1439 #endif /* DHD_PKTID_AUDIT_ENABLED */
1441 return (dhd_pktid_map_handle_t *)map; /* opaque handle */
1447 #if defined(DHD_PKTID_AUDIT_ENABLED)
1448 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1449 bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
1450 map->pktid_audit = (struct bcm_mwbmap *)NULL;
1451 if (map->pktid_audit_lock)
1452 DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
1454 #endif /* DHD_PKTID_AUDIT_ENABLED */
1456 if (map->pktid_lock)
1457 DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
1459 MFREE(osh, map, dhd_pktid_map_sz);
1462 return (dhd_pktid_map_handle_t *)NULL;
1466 * Retrieve all allocated keys and free all <numbered_key, locker>.
1467 * Freeing implies: unmapping the buffers and freeing the native packet
1468 * This could have been a callback registered with the pktid mapper.
1472 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1476 dhd_pktid_map_t *map;
1477 uint32 dhd_pktid_map_sz;
1478 dhd_pktid_item_t *locker;
1482 if (handle == NULL) {
1486 map = (dhd_pktid_map_t *)handle;
1487 flags = DHD_PKTID_LOCK(map->pktid_lock);
1490 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
1492 nkey = 1; /* skip reserved KEY #0, and start from 1 */
1493 locker = &map->lockers[nkey];
1495 map_items = DHD_PKIDMAP_ITEMS(map->items);
1497 for (; nkey <= map_items; nkey++, locker++) {
1499 if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */
1501 locker->state = LOCKER_IS_FREE; /* force open the locker */
1503 #if defined(DHD_PKTID_AUDIT_ENABLED)
1504 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
1505 #endif /* DHD_PKTID_AUDIT_ENABLED */
1507 { /* This could be a callback registered with dhd_pktid_map */
1508 DMA_UNMAP(osh, locker->pa, locker->len,
1509 locker->dir, 0, DHD_DMAH_NULL);
1510 dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
1511 locker->pkttype, TRUE);
1514 #if defined(DHD_PKTID_AUDIT_ENABLED)
1516 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
1518 #endif /* DHD_PKTID_AUDIT_ENABLED */
1520 locker->pkt = NULL; /* clear saved pkt */
1524 #if defined(DHD_PKTID_AUDIT_ENABLED)
1525 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1526 bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
1527 map->pktid_audit = (struct bcm_mwbmap *)NULL;
1528 if (map->pktid_audit_lock) {
1529 DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
1532 #endif /* DHD_PKTID_AUDIT_ENABLED */
1534 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1535 DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
1537 #ifdef DHD_USE_STATIC_PKTIDMAP
1538 DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz);
1540 MFREE(osh, handle, dhd_pktid_map_sz);
1541 #endif /* DHD_USE_STATIC_PKTIDMAP */
1544 #ifdef IOCTLRESP_USE_CONSTMEM
1545 /** Called in detach scenario. Releasing IOCTL buffers. */
1547 dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1550 dhd_pktid_map_t *map;
1551 uint32 dhd_pktid_map_sz;
1552 dhd_pktid_item_t *locker;
1555 osl_t *osh = dhd->osh;
1557 if (handle == NULL) {
1561 map = (dhd_pktid_map_t *)handle;
1562 flags = DHD_PKTID_LOCK(map->pktid_lock);
1564 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
1566 nkey = 1; /* skip reserved KEY #0, and start from 1 */
1567 locker = &map->lockers[nkey];
1569 map_items = DHD_PKIDMAP_ITEMS(map->items);
1571 for (; nkey <= map_items; nkey++, locker++) {
1573 if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */
1575 locker->state = LOCKER_IS_FREE; /* force open the locker */
1577 #if defined(DHD_PKTID_AUDIT_ENABLED)
1578 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
1579 #endif /* DHD_PKTID_AUDIT_ENABLED */
1582 dhd_dma_buf_t retbuf;
1583 retbuf.va = locker->pkt;
1584 retbuf.len = locker->len;
1585 retbuf.pa = locker->pa;
1586 retbuf.dmah = locker->dmah;
1587 retbuf.secdma = locker->secdma;
1589 /* This could be a callback registered with dhd_pktid_map */
1590 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1591 free_ioctl_return_buffer(dhd, &retbuf);
1592 flags = DHD_PKTID_LOCK(map->pktid_lock);
1595 #if defined(DHD_PKTID_AUDIT_ENABLED)
1597 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
1599 #endif /* DHD_PKTID_AUDIT_ENABLED */
1601 locker->pkt = NULL; /* clear saved pkt */
1605 #if defined(DHD_PKTID_AUDIT_ENABLED)
1606 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1607 bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
1608 map->pktid_audit = (struct bcm_mwbmap *)NULL;
1609 if (map->pktid_audit_lock) {
1610 DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
1613 #endif /* DHD_PKTID_AUDIT_ENABLED */
1615 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1616 DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
1618 #ifdef DHD_USE_STATIC_PKTIDMAP
1619 DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz);
1621 MFREE(osh, handle, dhd_pktid_map_sz);
1622 #endif /* DHD_USE_STATIC_PKTIDMAP */
1624 #endif /* IOCTLRESP_USE_CONSTMEM */
1626 /** Get the pktid free count */
1627 static INLINE uint32 BCMFASTPATH
1628 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
1630 dhd_pktid_map_t *map;
1634 ASSERT(handle != NULL);
1635 map = (dhd_pktid_map_t *)handle;
1637 flags = DHD_PKTID_LOCK(map->pktid_lock);
1639 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1645 * Allocate locker, save pkt contents, and return the locker's numbered key.
1646 * dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility.
1647 * Caller must treat a returned value DHD_PKTID_INVALID as a failure case,
1648 * implying a depleted pool of pktids.
1651 static INLINE uint32
1652 __dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt)
1655 dhd_pktid_map_t *map;
1656 dhd_pktid_item_t *locker;
1658 ASSERT(handle != NULL);
1659 map = (dhd_pktid_map_t *)handle;
1661 if (map->avail <= 0) { /* no more pktids to allocate */
1663 DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
1664 return DHD_PKTID_INVALID; /* failed alloc request */
1667 ASSERT(map->avail <= map->items);
1668 nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
1669 locker = &map->lockers[nkey]; /* save packet metadata in locker */
1671 locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
1673 locker->state = LOCKER_IS_BUSY; /* reserve this locker */
1675 #if defined(DHD_PKTID_AUDIT_MAP)
1676 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_ALLOC); /* Audit duplicate alloc */
1677 #endif /* DHD_PKTID_AUDIT_MAP */
1679 ASSERT(nkey != DHD_PKTID_INVALID);
1680 return nkey; /* return locker's numbered key */
1685 * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
1686 * yet populated. Invoke the pktid save api to populate the packet parameters
1688 * Wrapper that takes the required lock when called directly.
1690 static INLINE uint32
1691 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt)
1693 dhd_pktid_map_t *map;
1697 ASSERT(handle != NULL);
1698 map = (dhd_pktid_map_t *)handle;
1699 flags = DHD_PKTID_LOCK(map->pktid_lock);
1700 ret = __dhd_pktid_map_reserve(dhd, handle, pkt);
1701 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1707 __dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
1708 uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
1709 dhd_pkttype_t pkttype)
1711 dhd_pktid_map_t *map;
1712 dhd_pktid_item_t *locker;
1714 ASSERT(handle != NULL);
1715 map = (dhd_pktid_map_t *)handle;
1717 ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items)));
1719 locker = &map->lockers[nkey];
1721 ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
1722 ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
1724 #if defined(DHD_PKTID_AUDIT_MAP)
1725 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */
1726 #endif /* DHD_PKTID_AUDIT_MAP */
1728 /* store contents in locker */
1731 locker->len = (uint16)len; /* 16bit len */
1732 locker->dmah = dmah; /* 16bit len */
1733 locker->secdma = secdma;
1734 locker->pkttype = pkttype;
1736 locker->state = LOCKER_IS_BUSY; /* make this locker busy */
1740 * dhd_pktid_map_save - Save a packet's parameters into a locker corresponding
1741 * to a previously reserved unique numbered key.
1742 * Wrapper that takes the required lock when called directly.
1745 dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
1746 uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
1747 dhd_pkttype_t pkttype)
1749 dhd_pktid_map_t *map;
1752 ASSERT(handle != NULL);
1753 map = (dhd_pktid_map_t *)handle;
1754 flags = DHD_PKTID_LOCK(map->pktid_lock);
1755 __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, len,
1756 dir, dmah, secdma, pkttype);
1757 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1761 * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
1762 * contents into the corresponding locker. Return the numbered key.
1764 static uint32 BCMFASTPATH
1765 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
1766 dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
1767 dhd_pkttype_t pkttype)
1771 dhd_pktid_map_t *map;
1773 ASSERT(handle != NULL);
1774 map = (dhd_pktid_map_t *)handle;
1776 flags = DHD_PKTID_LOCK(map->pktid_lock);
1778 nkey = __dhd_pktid_map_reserve(dhd, handle, pkt);
1779 if (nkey != DHD_PKTID_INVALID) {
1780 __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
1781 len, dir, dmah, secdma, pkttype);
1782 #if defined(DHD_PKTID_AUDIT_MAP)
1783 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */
1784 #endif /* DHD_PKTID_AUDIT_MAP */
1787 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1793 * dhd_pktid_map_free - Given a numbered key, return the locker contents.
1794 * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
1795 * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
1796 * value. Only a previously allocated pktid may be freed.
1798 static void * BCMFASTPATH
1799 dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
1800 dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma,
1801 dhd_pkttype_t pkttype, bool rsv_locker)
1803 dhd_pktid_map_t *map;
1804 dhd_pktid_item_t *locker;
1808 ASSERT(handle != NULL);
1810 map = (dhd_pktid_map_t *)handle;
1812 flags = DHD_PKTID_LOCK(map->pktid_lock);
1814 ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items)));
1816 locker = &map->lockers[nkey];
1818 #if defined(DHD_PKTID_AUDIT_MAP)
1819 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
1820 #endif /* DHD_PKTID_AUDIT_MAP */
1822 if (locker->state == LOCKER_IS_FREE) { /* Debug check for cloned numbered key */
1823 DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n",
1824 __FUNCTION__, __LINE__, nkey));
1825 ASSERT(locker->state != LOCKER_IS_FREE);
1827 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1831 /* Check for the colour of the buffer i.e The buffer posted for TX,
1832 * should be freed for TX completion. Similarly the buffer posted for
1833 * IOCTL should be freed for IOCT completion etc.
1835 if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
1837 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1839 DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
1840 __FUNCTION__, __LINE__, nkey));
1841 ASSERT(locker->pkttype == pkttype);
1846 if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
1848 map->keys[map->avail] = nkey; /* make this numbered key available */
1849 locker->state = LOCKER_IS_FREE; /* open and free Locker */
1851 /* pktid will be reused, but the locker does not have a valid pkt */
1852 locker->state = LOCKER_IS_RSVD;
1855 #if defined(DHD_PKTID_AUDIT_MAP)
1856 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
1857 #endif /* DHD_PKTID_AUDIT_MAP */
1859 *pa = locker->pa; /* return contents of locker */
1860 *len = (uint32)locker->len;
1861 *dmah = locker->dmah;
1862 *secdma = locker->secdma;
1865 locker->pkt = NULL; /* Clear pkt */
1868 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1872 #else /* ! DHD_PCIE_PKTID */
1875 typedef struct pktlist {
1876 PKT_LIST *tx_pkt_list; /* list for tx packets */
1877 PKT_LIST *rx_pkt_list; /* list for rx packets */
1878 PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */
1882 * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
1883 * of a one to one mapping 32bit pktptr and a 32bit pktid.
1885 * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
1886 * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
1888 * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
1890 #define DHD_PKTID32(pktptr32) ((uint32)(pktptr32))
1891 #define DHD_PKTPTR32(pktid32) ((void *)(pktid32))
1894 static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
1895 dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
1896 dhd_pkttype_t pkttype);
1897 static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
1898 dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
1899 dhd_pkttype_t pkttype);
1901 static dhd_pktid_map_handle_t *
1902 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index)
1904 osl_t *osh = dhd->osh;
1905 pktlists_t *handle = NULL;
1907 if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
1908 DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
1909 __FUNCTION__, __LINE__, sizeof(pktlists_t)));
1913 if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
1914 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
1915 __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
1919 if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
1920 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
1921 __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
1925 if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
1926 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
1927 __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
1931 PKTLIST_INIT(handle->tx_pkt_list);
1932 PKTLIST_INIT(handle->rx_pkt_list);
1933 PKTLIST_INIT(handle->ctrl_pkt_list);
1935 return (dhd_pktid_map_handle_t *) handle;
1938 if (handle->ctrl_pkt_list) {
1939 MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
1942 if (handle->rx_pkt_list) {
1943 MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
1946 if (handle->tx_pkt_list) {
1947 MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
1951 MFREE(osh, handle, sizeof(pktlists_t));
1955 return (dhd_pktid_map_handle_t *)NULL;
1959 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
1961 osl_t *osh = dhd->osh;
1962 pktlists_t *handle = (pktlists_t *) map;
1964 ASSERT(handle != NULL);
1965 if (handle == (pktlists_t *)NULL) {
1969 if (handle->ctrl_pkt_list) {
1970 PKTLIST_FINI(handle->ctrl_pkt_list);
1971 MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
1974 if (handle->rx_pkt_list) {
1975 PKTLIST_FINI(handle->rx_pkt_list);
1976 MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
1979 if (handle->tx_pkt_list) {
1980 PKTLIST_FINI(handle->tx_pkt_list);
1981 MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
1985 MFREE(osh, handle, sizeof(pktlists_t));
1989 /** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
1990 static INLINE uint32
1991 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
1992 dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
1993 dhd_pkttype_t pkttype)
1995 pktlists_t *handle = (pktlists_t *) map;
1996 ASSERT(pktptr32 != NULL);
1997 DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
1998 DHD_PKT_SET_DMAH(pktptr32, dmah);
1999 DHD_PKT_SET_PA(pktptr32, pa);
2000 DHD_PKT_SET_SECDMA(pktptr32, secdma);
2002 if (pkttype == PKTTYPE_DATA_TX) {
2003 PKTLIST_ENQ(handle->tx_pkt_list, pktptr32);
2004 } else if (pkttype == PKTTYPE_DATA_RX) {
2005 PKTLIST_ENQ(handle->rx_pkt_list, pktptr32);
2007 PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32);
2010 return DHD_PKTID32(pktptr32);
2013 /** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
2014 static INLINE void *
2015 dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
2016 dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
2017 dhd_pkttype_t pkttype)
2019 pktlists_t *handle = (pktlists_t *) map;
2022 ASSERT(pktid32 != 0U);
2023 pktptr32 = DHD_PKTPTR32(pktid32);
2024 *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
2025 *dmah = DHD_PKT_GET_DMAH(pktptr32);
2026 *pa = DHD_PKT_GET_PA(pktptr32);
2027 *secdma = DHD_PKT_GET_SECDMA(pktptr32);
2029 if (pkttype == PKTTYPE_DATA_TX) {
2030 PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32);
2031 } else if (pkttype == PKTTYPE_DATA_RX) {
2032 PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32);
2034 PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32);
2040 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) DHD_PKTID32(pkt)
2042 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
2043 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
2044 dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2045 (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2048 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
2049 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
2050 dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2051 (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2054 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2055 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \
2056 dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
2057 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2058 (void **)&secdma, (dhd_pkttype_t)(pkttype)); \
2061 #define DHD_PKTID_AVAIL(map) (~0)
2063 #endif /* ! DHD_PCIE_PKTID */
2065 /* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */
2069 * The PCIE FD protocol layer is constructed in two phases:
2070 * Phase 1. dhd_prot_attach()
2071 * Phase 2. dhd_prot_init()
2073 * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
2074 * All Common rings are allose attached (msgbuf_ring_t objects are allocated
2075 * with DMA-able buffers).
2076 * All dhd_dma_buf_t objects are also allocated here.
2078 * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
2079 * initialization of objects that requires information advertized by the dongle
2080 * may not be performed here.
2081 * E.g. the number of TxPost flowrings is not know at this point, neither do
2082 * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
2083 * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
2084 * rings (common + flow).
2086 * dhd_prot_init() is invoked after the bus layer has fetched the information
2087 * advertized by the dongle in the pcie_shared_t.
2090 dhd_prot_attach(dhd_pub_t *dhd)
2092 osl_t *osh = dhd->osh;
2095 /* Allocate prot structure */
2096 if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
2097 sizeof(dhd_prot_t)))) {
2098 DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
2101 memset(prot, 0, sizeof(*prot));
2106 /* DMAing ring completes supported? FALSE by default */
2107 dhd->dma_d2h_ring_upd_support = FALSE;
2108 dhd->dma_h2d_ring_upd_support = FALSE;
2110 /* Common Ring Allocations */
2112 /* Ring 0: H2D Control Submission */
2113 if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
2114 H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
2115 BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
2116 DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
2121 /* Ring 1: H2D Receive Buffer Post */
2122 if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
2123 H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
2124 BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
2125 DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
2130 /* Ring 2: D2H Control Completion */
2131 if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
2132 D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
2133 BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
2134 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
2139 /* Ring 3: D2H Transmit Complete */
2140 if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
2141 D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
2142 BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
2143 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
2149 /* Ring 4: D2H Receive Complete */
2150 if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
2151 D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
2152 BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
2153 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
2160 * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
2161 * buffers for flowrings will be instantiated, in dhd_prot_init() .
2162 * See dhd_prot_flowrings_pool_attach()
2164 /* ioctl response buffer */
2165 if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
2169 /* IOCTL request buffer */
2170 if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
2174 /* Scratch buffer for dma rx offset */
2175 if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) {
2179 /* scratch buffer bus throughput measurement */
2180 if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
2184 #ifdef DHD_RX_CHAINING
2185 dhd_rxchain_reset(&prot->rxchain);
2190 /* Initialize the work queues to be used by the Load Balancing logic */
2191 #if defined(DHD_LB_TXC)
2194 buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
2195 bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
2196 buffer, DHD_LB_WORKQ_SZ);
2197 prot->tx_compl_prod_sync = 0;
2198 DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
2199 __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
2201 #endif /* DHD_LB_TXC */
2203 #if defined(DHD_LB_RXC)
2206 buffer = MALLOC(dhd->osh, sizeof(uint32) * DHD_LB_WORKQ_SZ);
2207 bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
2208 buffer, DHD_LB_WORKQ_SZ);
2209 prot->rx_compl_prod_sync = 0;
2210 DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
2211 __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
2213 #endif /* DHD_LB_RXC */
2221 #ifndef CONFIG_DHD_USE_STATIC_BUF
2223 dhd_prot_detach(dhd);
2225 #endif /* CONFIG_DHD_USE_STATIC_BUF */
2228 } /* dhd_prot_attach */
2232 * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
2233 * completed it's initialization of the pcie_shared structure, we may now fetch
2234 * the dongle advertized features and adjust the protocol layer accordingly.
2236 * dhd_prot_init() may be invoked again after a dhd_prot_reset().
2239 dhd_prot_init(dhd_pub_t *dhd)
2241 sh_addr_t base_addr;
2242 dhd_prot_t *prot = dhd->prot;
2244 /* PKTID handle INIT */
2245 if (prot->pktid_map_handle != NULL) {
2246 DHD_ERROR(("%s: pktid_map_handle already set!\n", __FUNCTION__));
2251 #ifdef IOCTLRESP_USE_CONSTMEM
2252 if (prot->pktid_map_handle_ioctl != NULL) {
2253 DHD_ERROR(("%s: pktid_map_handle_ioctl already set!\n", __FUNCTION__));
2257 #endif /* IOCTLRESP_USE_CONSTMEM */
2259 prot->pktid_map_handle = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_ITEMS, PKTID_MAP_HANDLE);
2260 if (prot->pktid_map_handle == NULL) {
2261 DHD_ERROR(("%s: Unable to map packet id's\n", __FUNCTION__));
2266 #ifdef IOCTLRESP_USE_CONSTMEM
2267 prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
2268 DHD_FLOWRING_MAX_IOCTLRESPBUF_POST, PKTID_MAP_HANDLE_IOCTL);
2269 if (prot->pktid_map_handle_ioctl == NULL) {
2270 DHD_ERROR(("%s: Unable to map ioctl response buffers\n", __FUNCTION__));
2274 #endif /* IOCTLRESP_USE_CONSTMEM */
2276 /* Max pkts in ring */
2277 prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM;
2279 DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count));
2281 /* Read max rx packets supported by dongle */
2282 dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
2283 if (prot->max_rxbufpost == 0) {
2284 /* This would happen if the dongle firmware is not */
2285 /* using the latest shared structure template */
2286 prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
2288 DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
2290 /* Initialize. bzero() would blow away the dma pointers. */
2291 prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
2292 prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
2294 prot->cur_ioctlresp_bufs_posted = 0;
2295 prot->active_tx_count = 0;
2296 prot->data_seq_no = 0;
2297 prot->ioctl_seq_no = 0;
2298 prot->rxbufpost = 0;
2299 prot->cur_event_bufs_posted = 0;
2300 prot->ioctl_state = 0;
2301 prot->curr_ioctl_cmd = 0;
2302 prot->ioctl_received = IOCTL_WAIT;
2304 prot->dmaxfer.srcmem.va = NULL;
2305 prot->dmaxfer.dstmem.va = NULL;
2306 prot->dmaxfer.in_progress = FALSE;
2308 prot->metadata_dbg = FALSE;
2309 prot->rx_metadata_offset = 0;
2310 prot->tx_metadata_offset = 0;
2311 prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
2313 prot->ioctl_trans_id = 0;
2315 /* Register the interrupt function upfront */
2316 /* remove corerev checks in data path */
2317 prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
2319 /* Initialize Common MsgBuf Rings */
2321 dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
2322 dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
2323 dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
2324 dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
2325 dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
2327 #if defined(PCIE_D2H_SYNC)
2328 dhd_prot_d2h_sync_init(dhd);
2329 #endif /* PCIE_D2H_SYNC */
2331 dhd_prot_h2d_sync_init(dhd);
2333 /* init the scratch buffer */
2334 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
2335 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2336 D2H_DMA_SCRATCH_BUF, 0);
2337 dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
2338 sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
2340 /* If supported by the host, indicate the memory block
2341 * for completion writes / submission reads to shared space
2343 if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
2344 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
2345 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2346 D2H_DMA_INDX_WR_BUF, 0);
2347 dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
2348 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2349 H2D_DMA_INDX_RD_BUF, 0);
2352 if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
2353 dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
2354 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2355 H2D_DMA_INDX_WR_BUF, 0);
2356 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
2357 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2358 D2H_DMA_INDX_RD_BUF, 0);
2362 * If the DMA-able buffers for flowring needs to come from a specific
2363 * contiguous memory region, then setup prot->flowrings_dma_buf here.
2364 * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
2365 * this contiguous memory region, for each of the flowrings.
2368 /* Pre-allocate pool of msgbuf_ring for flowrings */
2369 if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
2373 /* Host should configure soft doorbells if needed ... here */
2375 /* Post to dongle host configured soft doorbells */
2376 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
2378 /* Post buffers for packet reception and ioctl/event responses */
2379 dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
2380 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
2381 dhd_msgbuf_rxbuf_post_event_bufs(dhd);
2384 } /* dhd_prot_init */
2388 * dhd_prot_detach - PCIE FD protocol layer destructor.
2389 * Unlink, frees allocated protocol memory (including dhd_prot)
2392 dhd_prot_detach(dhd_pub_t *dhd)
2394 dhd_prot_t *prot = dhd->prot;
2396 /* Stop the protocol module */
2399 /* free up all DMA-able buffers allocated during prot attach/init */
2401 dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
2402 dhd_dma_buf_free(dhd, &prot->retbuf); /* ioctl return buffer */
2403 dhd_dma_buf_free(dhd, &prot->ioctbuf);
2404 dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
2406 /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
2407 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
2408 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
2409 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
2410 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
2412 /* Common MsgBuf Rings */
2413 dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
2414 dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
2415 dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
2416 dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
2417 dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
2419 /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
2420 dhd_prot_flowrings_pool_detach(dhd);
2422 DHD_NATIVE_TO_PKTID_FINI(dhd, dhd->prot->pktid_map_handle);
2424 #ifndef CONFIG_DHD_USE_STATIC_BUF
2425 MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
2426 #endif /* CONFIG_DHD_USE_STATIC_BUF */
2429 #if defined(DHD_LB_TXC)
2430 if (prot->tx_compl_prod.buffer) {
2431 MFREE(dhd->osh, prot->tx_compl_prod.buffer,
2432 sizeof(void*) * DHD_LB_WORKQ_SZ);
2434 #endif /* DHD_LB_TXC */
2435 #if defined(DHD_LB_RXC)
2436 if (prot->rx_compl_prod.buffer) {
2437 MFREE(dhd->osh, prot->rx_compl_prod.buffer,
2438 sizeof(void*) * DHD_LB_WORKQ_SZ);
2440 #endif /* DHD_LB_RXC */
2445 } /* dhd_prot_detach */
2449 * dhd_prot_reset - Reset the protocol layer without freeing any objects. This
2450 * may be invoked to soft reboot the dongle, without having to detach and attach
2451 * the entire protocol layer.
2453 * After dhd_prot_reset(), dhd_prot_init() may be invoked without going through
2454 * a dhd_prot_attach() phase.
2457 dhd_prot_reset(dhd_pub_t *dhd)
2459 struct dhd_prot *prot = dhd->prot;
2461 DHD_TRACE(("%s\n", __FUNCTION__));
2467 dhd_prot_flowrings_pool_reset(dhd);
2469 dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
2470 dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
2471 dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
2472 dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
2473 dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
2475 dhd_dma_buf_reset(dhd, &prot->retbuf);
2476 dhd_dma_buf_reset(dhd, &prot->ioctbuf);
2477 dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
2478 dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
2479 dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
2480 dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
2481 dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
2484 prot->rx_metadata_offset = 0;
2485 prot->tx_metadata_offset = 0;
2487 prot->rxbufpost = 0;
2488 prot->cur_event_bufs_posted = 0;
2489 prot->cur_ioctlresp_bufs_posted = 0;
2491 prot->active_tx_count = 0;
2492 prot->data_seq_no = 0;
2493 prot->ioctl_seq_no = 0;
2494 prot->ioctl_state = 0;
2495 prot->curr_ioctl_cmd = 0;
2496 prot->ioctl_received = IOCTL_WAIT;
2497 prot->ioctl_trans_id = 0;
2499 /* dhd_flow_rings_init is located at dhd_bus_start,
2500 * so when stopping bus, flowrings shall be deleted
2502 if (dhd->flow_rings_inited) {
2503 dhd_flow_rings_deinit(dhd);
2506 if (prot->pktid_map_handle) {
2507 DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_map_handle);
2508 prot->pktid_map_handle = NULL;
2511 #ifdef IOCTLRESP_USE_CONSTMEM
2512 if (prot->pktid_map_handle_ioctl) {
2513 DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
2514 prot->pktid_map_handle_ioctl = NULL;
2516 #endif /* IOCTLRESP_USE_CONSTMEM */
2517 } /* dhd_prot_reset */
2521 dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
2523 dhd_prot_t *prot = dhd->prot;
2524 prot->rx_dataoffset = rx_offset;
2528 * Initialize protocol: sync w/dongle state.
2529 * Sets dongle media info (iswl, drv_version, mac address).
2532 dhd_sync_with_dongle(dhd_pub_t *dhd)
2535 wlc_rev_info_t revinfo;
2538 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2540 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
2544 #ifdef DHD_FW_COREDUMP
2545 /* Check the memdump capability */
2546 dhd_get_memdump_info(dhd);
2547 #endif /* DHD_FW_COREDUMP */
2548 #ifdef BCMASSERT_LOG
2549 dhd_get_assert_info(dhd);
2550 #endif /* BCMASSERT_LOG */
2552 /* Get the device rev info */
2553 memset(&revinfo, 0, sizeof(revinfo));
2554 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
2556 DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
2559 DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
2560 revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
2562 dhd_process_cid_mac(dhd, TRUE);
2564 ret = dhd_preinit_ioctls(dhd);
2567 dhd_process_cid_mac(dhd, FALSE);
2570 /* Always assumes wl for now */
2574 } /* dhd_sync_with_dongle */
2578 /* DHD load balancing: deferral of work to another online CPU */
2580 /* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
2581 extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
2582 extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
2583 extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
2585 extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
2588 * dhd_lb_dispatch - load balance by dispatch work to other CPU cores
2589 * Note: rx_compl_tasklet is dispatched explicitly.
2592 dhd_lb_dispatch(dhd_pub_t *dhdp, uint16 ring_idx)
2596 #if defined(DHD_LB_TXC)
2597 case BCMPCIE_D2H_MSGRING_TX_COMPLETE:
2598 bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
2599 dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
2601 #endif /* DHD_LB_TXC */
2603 case BCMPCIE_D2H_MSGRING_RX_COMPLETE:
2605 #if defined(DHD_LB_RXC)
2606 dhd_prot_t *prot = dhdp->prot;
2607 /* Schedule the takslet only if we have to */
2608 if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
2609 /* flush WR index */
2610 bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
2611 dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
2613 #endif /* DHD_LB_RXC */
2614 #if defined(DHD_LB_RXP)
2615 dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
2616 #endif /* DHD_LB_RXP */
2625 #if defined(DHD_LB_TXC)
2627 * DHD load balanced tx completion tasklet handler, that will perform the
2628 * freeing of packets on the selected CPU. Packet pointers are delivered to
2629 * this tasklet via the tx complete workq.
2632 dhd_lb_tx_compl_handler(unsigned long data)
2638 dhd_pub_t *dhd = (dhd_pub_t *)data;
2639 dhd_prot_t *prot = dhd->prot;
2640 bcm_workq_t *workq = &prot->tx_compl_cons;
2643 DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
2646 elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
2648 if (elem_ix == BCM_RING_EMPTY) {
2652 elem = WORKQ_ELEMENT(void *, workq, elem_ix);
2655 DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt));
2657 OSL_PREFETCH(PKTTAG(pkt));
2660 pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
2661 pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
2663 DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
2665 #if defined(BCMPCIE)
2666 dhd_txcomplete(dhd, pkt, true);
2669 PKTFREE(dhd->osh, pkt, TRUE);
2674 bcm_workq_cons_sync(workq);
2675 DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count);
2677 #endif /* DHD_LB_TXC */
2679 #if defined(DHD_LB_RXC)
2681 dhd_lb_rx_compl_handler(unsigned long data)
2683 dhd_pub_t *dhd = (dhd_pub_t *)data;
2684 bcm_workq_t *workq = &dhd->prot->rx_compl_cons;
2686 DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd);
2688 dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */
2689 bcm_workq_cons_sync(workq);
2691 #endif /* DHD_LB_RXC */
2695 #define DHD_DBG_SHOW_METADATA 0
2697 #if DHD_DBG_SHOW_METADATA
2698 static void BCMFASTPATH
2699 dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
2703 uint8 *tlv_v = (uint8 *)ptr;
2705 if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
2708 len -= BCMPCIE_D2H_METADATA_HDRLEN;
2709 tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
2711 while (len > TLV_HDR_LEN) {
2712 tlv_t = tlv_v[TLV_TAG_OFF];
2713 tlv_l = tlv_v[TLV_LEN_OFF];
2716 tlv_v += TLV_HDR_LEN;
2719 if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
2723 case WLFC_CTL_TYPE_TXSTATUS: {
2725 memcpy(&txs, tlv_v, sizeof(uint32));
2726 if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
2727 printf("METADATA TX_STATUS: %08x\n", txs);
2729 wl_txstatus_additional_info_t tx_add_info;
2730 memcpy(&tx_add_info, tlv_v + sizeof(uint32),
2731 sizeof(wl_txstatus_additional_info_t));
2732 printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
2733 " rate = %08x tries = %d - %d\n", txs,
2734 tx_add_info.seq, tx_add_info.entry_ts,
2735 tx_add_info.enq_ts, tx_add_info.last_ts,
2736 tx_add_info.rspec, tx_add_info.rts_cnt,
2737 tx_add_info.tx_cnt);
2741 case WLFC_CTL_TYPE_RSSI: {
2743 printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
2745 printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
2746 (*(tlv_v + 3) << 8) | *(tlv_v + 2),
2747 (int8)(*tlv_v), *(tlv_v + 1));
2750 case WLFC_CTL_TYPE_FIFO_CREDITBACK:
2751 bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
2754 case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
2755 bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
2758 case WLFC_CTL_TYPE_RX_STAMP: {
2764 memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
2765 printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
2766 rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
2769 case WLFC_CTL_TYPE_TRANS_ID:
2770 bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
2773 case WLFC_CTL_TYPE_COMP_TXSTATUS:
2774 bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
2778 bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
2786 #endif /* DHD_DBG_SHOW_METADATA */
2788 static INLINE void BCMFASTPATH
2789 dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
2792 if (pkttype == PKTTYPE_IOCTL_RX ||
2793 pkttype == PKTTYPE_EVENT_RX) {
2794 #ifdef DHD_USE_STATIC_CTRLBUF
2795 PKTFREE_STATIC(dhd->osh, pkt, send);
2797 PKTFREE(dhd->osh, pkt, send);
2798 #endif /* DHD_USE_STATIC_CTRLBUF */
2800 PKTFREE(dhd->osh, pkt, send);
2805 static INLINE void * BCMFASTPATH
2806 dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
2814 #ifdef DHD_PCIE_PKTID
2816 PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
2817 pktid, pa, len, dmah, secdma, pkttype);
2819 PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_map_handle,
2820 pktid, pa, len, dmah, secdma, pkttype);
2823 PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid, pa,
2824 len, dmah, secdma, pkttype);
2825 #endif /* DHD_PCIE_PKTID */
2829 if (SECURE_DMA_ENAB(dhd->osh)) {
2830 SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah,
2833 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
2841 #ifdef IOCTLRESP_USE_CONSTMEM
2842 static INLINE void BCMFASTPATH
2843 dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
2845 memset(retbuf, 0, sizeof(dhd_dma_buf_t));
2846 retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
2847 retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
2851 #endif /* IOCTLRESP_USE_CONSTMEM */
2853 static void BCMFASTPATH
2854 dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
2856 dhd_prot_t *prot = dhd->prot;
2861 fillbufs = prot->max_rxbufpost - prot->rxbufpost;
2862 while (fillbufs >= RX_BUF_BURST) {
2865 /* find a better way to reschedule rx buf post if space not available */
2866 DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
2867 DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
2871 /* Post in a burst of 32 buffers at a time */
2872 fillbufs = MIN(fillbufs, RX_BUF_BURST);
2875 retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
2877 if (retcount >= 0) {
2878 prot->rxbufpost += (uint16)retcount;
2880 /* dhd_prot_rxbuf_post returns the number of buffers posted */
2881 DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount);
2882 #endif /* DHD_LB_RXC */
2883 /* how many more to post */
2884 fillbufs = prot->max_rxbufpost - prot->rxbufpost;
2886 /* Make sure we don't run loop any further */
2892 /** Post 'count' no of rx buffers to dongle */
2893 static int BCMFASTPATH
2894 dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
2897 uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
2898 uint8 *rxbuf_post_tmp;
2899 host_rxbuf_post_t *rxbuf_post;
2905 unsigned long flags;
2907 dhd_prot_t *prot = dhd->prot;
2908 msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
2910 DHD_GENERAL_LOCK(dhd, flags);
2912 /* Claim space for exactly 'count' no of messages, for mitigation purpose */
2913 msg_start = (void *)
2914 dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
2916 DHD_GENERAL_UNLOCK(dhd, flags);
2918 if (msg_start == NULL) {
2919 DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
2922 /* if msg_start != NULL, we should have alloced space for atleast 1 item */
2923 ASSERT(alloced > 0);
2925 rxbuf_post_tmp = (uint8*)msg_start;
2927 /* loop through each allocated message in the rxbuf post msgbuf_ring */
2928 for (i = 0; i < alloced; i++) {
2929 rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
2930 /* Create a rx buffer */
2931 if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
2932 DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
2933 dhd->rx_pktgetfail++;
2937 pktlen = PKTLEN(dhd->osh, p);
2938 if (SECURE_DMA_ENAB(dhd->osh)) {
2939 DHD_GENERAL_LOCK(dhd, flags);
2940 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
2941 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
2942 DHD_GENERAL_UNLOCK(dhd, flags);
2944 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
2947 if (PHYSADDRISZERO(pa)) {
2948 if (SECURE_DMA_ENAB(dhd->osh)) {
2949 DHD_GENERAL_LOCK(dhd, flags);
2950 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
2951 ring->dma_buf.secdma, 0);
2952 DHD_GENERAL_UNLOCK(dhd, flags);
2954 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
2957 PKTFREE(dhd->osh, p, FALSE);
2958 DHD_ERROR(("Invalid phyaddr 0\n"));
2963 PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
2964 pktlen = PKTLEN(dhd->osh, p);
2966 /* Common msg header */
2967 rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
2968 rxbuf_post->cmn_hdr.if_id = 0;
2969 rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
2972 #if defined(DHD_LB_RXC)
2973 if (use_rsv_pktid == TRUE) {
2974 bcm_workq_t *workq = &prot->rx_compl_cons;
2975 int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
2976 if (elem_ix == BCM_RING_EMPTY) {
2977 DHD_ERROR(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
2978 pktid = DHD_PKTID_INVALID;
2981 uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
2985 /* Now populate the previous locker with valid information */
2986 if (pktid != DHD_PKTID_INVALID) {
2987 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
2988 DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, p, pktid,
2989 pa, pktlen, DMA_RX, NULL, ring->dma_buf.secdma,
2993 #endif /* DHD_LB_RXC */
2995 #if defined(DHD_LB_RXC)
2998 #if defined(DHD_PCIE_PKTID)
2999 /* get the lock before calling DHD_NATIVE_TO_PKTID */
3000 DHD_GENERAL_LOCK(dhd, flags);
3002 pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_map_handle, p, pa,
3003 pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
3005 #if defined(DHD_PCIE_PKTID)
3007 DHD_GENERAL_UNLOCK(dhd, flags);
3009 if (pktid == DHD_PKTID_INVALID) {
3011 if (SECURE_DMA_ENAB(dhd->osh)) {
3012 DHD_GENERAL_LOCK(dhd, flags);
3013 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
3014 ring->dma_buf.secdma, 0);
3015 DHD_GENERAL_UNLOCK(dhd, flags);
3017 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
3020 PKTFREE(dhd->osh, p, FALSE);
3021 DHD_ERROR(("Pktid pool depleted.\n"));
3024 #endif /* DHD_PCIE_PKTID */
3027 rxbuf_post->data_buf_len = htol16((uint16)pktlen);
3028 rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
3029 rxbuf_post->data_buf_addr.low_addr =
3030 htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
3032 if (prot->rx_metadata_offset) {
3033 rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
3034 rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
3035 rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
3037 rxbuf_post->metadata_buf_len = 0;
3038 rxbuf_post->metadata_buf_addr.high_addr = 0;
3039 rxbuf_post->metadata_buf_addr.low_addr = 0;
3042 #if defined(DHD_PKTID_AUDIT_RING)
3043 DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid, DHD_DUPLICATE_ALLOC);
3044 #endif /* DHD_PKTID_AUDIT_RING */
3046 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
3048 /* Move rxbuf_post_tmp to next item */
3049 rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
3053 if (ring->wr < (alloced - i)) {
3054 ring->wr = ring->max_items - (alloced - i);
3056 ring->wr -= (alloced - i);
3062 /* Update ring's WR index and ring doorbell to dongle */
3064 dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
3068 } /* dhd_prot_rxbuf_post */
3070 #ifdef IOCTLRESP_USE_CONSTMEM
3072 alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
3075 memset(retbuf, 0, sizeof(dhd_dma_buf_t));
3077 if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
3078 DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
3087 free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
3089 /* retbuf (declared on stack) not fully populated ... */
3092 dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
3093 retbuf->len = IOCT_RETBUF_SIZE;
3094 retbuf->_alloced = retbuf->len + dma_pad;
3095 /* JIRA:SWWLAN-70021 The pa value would be overwritten by the dongle.
3096 * Need to reassign before free to pass the check in dhd_dma_buf_audit().
3098 retbuf->pa = DMA_MAP(dhd->osh, retbuf->va, retbuf->len, DMA_RX, NULL, NULL);
3101 dhd_dma_buf_free(dhd, retbuf);
3104 #endif /* IOCTLRESP_USE_CONSTMEM */
3107 dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf)
3111 ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
3114 dhd_prot_t *prot = dhd->prot;
3116 unsigned long flags;
3117 dhd_dma_buf_t retbuf;
3121 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
3123 if (dhd->busstate == DHD_BUS_DOWN) {
3124 DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
3128 memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
3131 /* Allocate packet for event buffer post */
3132 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
3134 /* Allocate packet for ctrl/ioctl buffer post */
3135 pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
3138 #ifdef IOCTLRESP_USE_CONSTMEM
3140 if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
3141 DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
3144 ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
3146 pktlen = retbuf.len;
3150 #endif /* IOCTLRESP_USE_CONSTMEM */
3152 #ifdef DHD_USE_STATIC_CTRLBUF
3153 p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
3155 p = PKTGET(dhd->osh, pktsz, FALSE);
3156 #endif /* DHD_USE_STATIC_CTRLBUF */
3158 DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
3159 __FUNCTION__, __LINE__, event_buf ?
3160 "EVENT" : "IOCTL RESP"));
3161 dhd->rx_pktgetfail++;
3165 pktlen = PKTLEN(dhd->osh, p);
3167 if (SECURE_DMA_ENAB(dhd->osh)) {
3168 DHD_GENERAL_LOCK(dhd, flags);
3169 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
3170 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
3171 DHD_GENERAL_UNLOCK(dhd, flags);
3173 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
3176 if (PHYSADDRISZERO(pa)) {
3177 DHD_ERROR(("Invalid physaddr 0\n"));
3179 goto free_pkt_return;
3183 DHD_GENERAL_LOCK(dhd, flags);
3185 rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
3186 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
3188 if (rxbuf_post == NULL) {
3189 DHD_GENERAL_UNLOCK(dhd, flags);
3190 DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
3191 __FUNCTION__, __LINE__));
3193 #ifdef IOCTLRESP_USE_CONSTMEM
3195 #endif /* IOCTLRESP_USE_CONSTMEM */
3197 if (SECURE_DMA_ENAB(dhd->osh)) {
3198 DHD_GENERAL_LOCK(dhd, flags);
3199 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
3200 ring->dma_buf.secdma, 0);
3201 DHD_GENERAL_UNLOCK(dhd, flags);
3203 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
3206 goto free_pkt_return;
3209 /* CMN msg header */
3211 rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST;
3213 rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST;
3216 #ifdef IOCTLRESP_USE_CONSTMEM
3218 map_handle = dhd->prot->pktid_map_handle_ioctl;
3219 pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen,
3220 DMA_RX, dmah, ring->dma_buf.secdma, PKTTYPE_IOCTL_RX);
3222 #endif /* IOCTLRESP_USE_CONSTMEM */
3224 map_handle = dhd->prot->pktid_map_handle;
3225 pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
3226 p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
3227 event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX);
3230 if (pktid == DHD_PKTID_INVALID) {
3231 if (ring->wr == 0) {
3232 ring->wr = ring->max_items - 1;
3236 DHD_GENERAL_UNLOCK(dhd, flags);
3237 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
3238 goto free_pkt_return;
3241 #if defined(DHD_PKTID_AUDIT_RING)
3242 DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
3243 #endif /* DHD_PKTID_AUDIT_RING */
3245 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
3246 rxbuf_post->cmn_hdr.if_id = 0;
3247 rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
3250 #if defined(DHD_PCIE_PKTID)
3251 if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
3252 if (ring->wr == 0) {
3253 ring->wr = ring->max_items - 1;
3257 DHD_GENERAL_UNLOCK(dhd, flags);
3258 #ifdef IOCTLRESP_USE_CONSTMEM
3260 #endif /* IOCTLRESP_USE_CONSTMEM */
3262 if (SECURE_DMA_ENAB(dhd->osh)) {
3263 DHD_GENERAL_LOCK(dhd, flags);
3264 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
3265 ring->dma_buf.secdma, 0);
3266 DHD_GENERAL_UNLOCK(dhd, flags);
3268 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
3271 goto free_pkt_return;
3273 #endif /* DHD_PCIE_PKTID */
3275 rxbuf_post->cmn_hdr.flags = 0;
3276 #ifndef IOCTLRESP_USE_CONSTMEM
3277 rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
3279 rxbuf_post->host_buf_len = htol16((uint16)pktlen);
3280 #endif /* IOCTLRESP_USE_CONSTMEM */
3281 rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
3282 rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
3284 /* update ring's WR index and ring doorbell to dongle */
3285 dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
3286 DHD_GENERAL_UNLOCK(dhd, flags);
3291 #ifdef IOCTLRESP_USE_CONSTMEM
3293 free_ioctl_return_buffer(dhd, &retbuf);
3295 #endif /* IOCTLRESP_USE_CONSTMEM */
3297 dhd_prot_packet_free(dhd, p,
3298 event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX,
3303 } /* dhd_prot_rxbufpost_ctrl */
3306 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post)
3311 DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf));
3313 if (dhd->busstate == DHD_BUS_DOWN) {
3314 DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
3318 while (i < max_to_post) {
3319 ret_val = dhd_prot_rxbufpost_ctrl(dhd, event_buf);
3325 DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf));
3330 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
3332 dhd_prot_t *prot = dhd->prot;
3335 DHD_INFO(("ioctl resp buf post\n"));
3336 max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
3337 if (max_to_post <= 0) {
3338 DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
3342 prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
3343 FALSE, max_to_post);
3347 dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
3349 dhd_prot_t *prot = dhd->prot;
3352 max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
3353 if (max_to_post <= 0) {
3354 DHD_INFO(("%s: Cannot post more than max event buffers\n",
3358 prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
3362 /** called when DHD needs to check for 'receive complete' messages from the dongle */
3364 dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound)
3368 msgbuf_ring_t *ring = &dhd->prot->d2hring_rx_cpln;
3370 /* Process all the messages - DTOH direction */
3371 while (!dhd_is_device_removed(dhd)) {
3375 if (dhd->hang_was_sent) {
3380 /* Get the address of the next message to be read from ring */
3381 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
3382 if (msg_addr == NULL) {
3387 /* Prefetch data to populate the cache */
3388 OSL_PREFETCH(msg_addr);
3390 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
3391 DHD_ERROR(("%s: process %s msg addr %p len %d\n",
3392 __FUNCTION__, ring->name, msg_addr, msg_len));
3395 /* Update read pointer */
3396 dhd_prot_upd_read_idx(dhd, ring);
3398 /* After batch processing, check RX bound */
3399 n += msg_len / ring->item_len;
3409 * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
3412 dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
3414 msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
3416 /* Update read pointer */
3417 if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
3418 ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
3421 DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
3422 ring->idx, flowid, ring->wr, ring->rd));
3424 /* Need more logic here, but for now use it directly */
3425 dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
3428 /** called when DHD needs to check for 'transmit complete' messages from the dongle */
3430 dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound)
3434 msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
3436 /* Process all the messages - DTOH direction */
3437 while (!dhd_is_device_removed(dhd)) {
3441 if (dhd->hang_was_sent) {
3446 /* Get the address of the next message to be read from ring */
3447 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
3448 if (msg_addr == NULL) {
3453 /* Prefetch data to populate the cache */
3454 OSL_PREFETCH(msg_addr);
3456 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
3457 DHD_ERROR(("%s: process %s msg addr %p len %d\n",
3458 __FUNCTION__, ring->name, msg_addr, msg_len));
3461 /* Write to dngl rd ptr */
3462 dhd_prot_upd_read_idx(dhd, ring);
3464 /* After batch processing, check bound */
3465 n += msg_len / ring->item_len;
3474 /** called when DHD needs to check for 'ioctl complete' messages from the dongle */
3476 dhd_prot_process_ctrlbuf(dhd_pub_t *dhd)
3478 dhd_prot_t *prot = dhd->prot;
3479 msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
3481 /* Process all the messages - DTOH direction */
3482 while (!dhd_is_device_removed(dhd)) {
3486 if (dhd->hang_was_sent) {
3490 /* Get the address of the next message to be read from ring */
3491 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
3492 if (msg_addr == NULL) {
3496 /* Prefetch data to populate the cache */
3497 OSL_PREFETCH(msg_addr);
3499 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
3500 DHD_ERROR(("%s: process %s msg addr %p len %d\n",
3501 __FUNCTION__, ring->name, msg_addr, msg_len));
3504 /* Write to dngl rd ptr */
3505 dhd_prot_upd_read_idx(dhd, ring);
3512 * Consume messages out of the D2H ring. Ensure that the message's DMA to host
3513 * memory has completed, before invoking the message handler via a table lookup
3514 * of the cmn_msg_hdr::msg_type.
3516 static int BCMFASTPATH
3517 dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
3522 cmn_msg_hdr_t *msg = NULL;
3526 item_len = ring->item_len;
3527 if (item_len == 0) {
3528 DHD_ERROR(("%s: ringidx %d item_len %d buf_len %d\n",
3529 __FUNCTION__, ring->idx, item_len, buf_len));
3533 while (buf_len > 0) {
3534 if (dhd->hang_was_sent) {
3539 msg = (cmn_msg_hdr_t *)buf;
3542 * Update the curr_rd to the current index in the ring, from where
3543 * the work item is fetched. This way if the fetched work item
3544 * fails in LIVELOCK, we can print the exact read index in the ring
3545 * that shows up the corrupted work item.
3547 if ((ring->curr_rd + 1) >= ring->max_items) {
3553 #if defined(PCIE_D2H_SYNC)
3554 /* Wait until DMA completes, then fetch msg_type */
3555 msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
3557 msg_type = msg->msg_type;
3558 #endif /* !PCIE_D2H_SYNC */
3560 /* Prefetch data to populate the cache */
3561 OSL_PREFETCH(buf + item_len);
3563 DHD_INFO(("msg_type %d item_len %d buf_len %d\n",
3564 msg_type, item_len, buf_len));
3566 if (msg_type == MSG_TYPE_LOOPBACK) {
3567 bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
3568 DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
3571 ASSERT(msg_type < DHD_PROT_FUNCS);
3572 if (msg_type >= DHD_PROT_FUNCS) {
3573 DHD_ERROR(("%s: msg_type %d item_len %d buf_len %d\n",
3574 __FUNCTION__, msg_type, item_len, buf_len));
3579 if (table_lookup[msg_type]) {
3580 table_lookup[msg_type](dhd, buf);
3583 if (buf_len < item_len) {
3587 buf_len = buf_len - item_len;
3588 buf = buf + item_len;
3593 #ifdef DHD_RX_CHAINING
3594 dhd_rxchain_commit(dhd);
3597 dhd_lb_dispatch(dhd, ring->idx);
3600 } /* dhd_prot_process_msgtype */
3603 dhd_prot_noop(dhd_pub_t *dhd, void *msg)
3608 /** called on MSG_TYPE_RING_STATUS message received from dongle */
3610 dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
3612 pcie_ring_status_t *ring_status = (pcie_ring_status_t *)msg;
3613 DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
3614 ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status,
3615 ring_status->compl_hdr.flow_ring_id, ring_status->write_idx));
3616 /* How do we track this to pair it with ??? */
3620 /** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
3622 dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
3624 pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
3625 DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
3626 gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
3627 gen_status->compl_hdr.flow_ring_id));
3629 /* How do we track this to pair it with ??? */
3634 * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
3635 * dongle received the ioctl message in dongle memory.
3638 dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
3641 ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
3642 unsigned long flags;
3644 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
3646 #if defined(DHD_PKTID_AUDIT_RING)
3647 /* Skip DHD_IOCTL_REQ_PKTID = 0xFFFE */
3648 if (pktid != DHD_IOCTL_REQ_PKTID) {
3649 if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
3650 DHD_TEST_IS_ALLOC) == BCME_ERROR) {
3651 prhex("dhd_prot_ioctack_process:",
3652 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
3655 #endif /* DHD_PKTID_AUDIT_RING */
3657 DHD_GENERAL_LOCK(dhd, flags);
3658 if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
3659 (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
3660 dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
3662 DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
3663 __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
3664 prhex("dhd_prot_ioctack_process:",
3665 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
3667 DHD_GENERAL_UNLOCK(dhd, flags);
3669 DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
3670 ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
3671 ioct_ack->compl_hdr.flow_ring_id));
3672 if (ioct_ack->compl_hdr.status != 0) {
3673 DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
3677 /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
3679 dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
3681 dhd_prot_t *prot = dhd->prot;
3682 uint32 pkt_id, xt_id;
3683 ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
3685 unsigned long flags;
3686 dhd_dma_buf_t retbuf;
3688 memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
3690 pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
3692 #if defined(DHD_PKTID_AUDIT_RING)
3695 #ifndef IOCTLRESP_USE_CONSTMEM
3696 ret = DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pkt_id,
3697 DHD_DUPLICATE_FREE);
3699 ret = DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle_ioctl, pkt_id,
3700 DHD_DUPLICATE_FREE);
3701 #endif /* !IOCTLRESP_USE_CONSTMEM */
3702 if (ret == BCME_ERROR) {
3703 prhex("dhd_prot_ioctcmplt_process:",
3704 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
3707 #endif /* DHD_PKTID_AUDIT_RING */
3709 DHD_GENERAL_LOCK(dhd, flags);
3710 if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
3711 !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
3712 DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
3713 __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
3714 prhex("dhd_prot_ioctcmplt_process:",
3715 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
3716 DHD_GENERAL_UNLOCK(dhd, flags);
3719 #ifndef IOCTLRESP_USE_CONSTMEM
3720 pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
3722 dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
3724 #endif /* !IOCTLRESP_USE_CONSTMEM */
3726 prot->ioctl_state = 0;
3727 DHD_GENERAL_UNLOCK(dhd, flags);
3728 DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
3731 DHD_GENERAL_UNLOCK(dhd, flags);
3733 prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
3734 prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
3735 xt_id = ltoh16(ioct_resp->trans_id);
3736 if (xt_id != prot->ioctl_trans_id) {
3741 DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
3742 pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
3744 if (prot->ioctl_resplen > 0) {
3745 #ifndef IOCTLRESP_USE_CONSTMEM
3746 bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
3748 bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
3749 #endif /* !IOCTLRESP_USE_CONSTMEM */
3752 /* wake up any dhd_os_ioctl_resp_wait() */
3753 dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
3756 #ifndef IOCTLRESP_USE_CONSTMEM
3757 dhd_prot_packet_free(dhd, pkt,
3758 PKTTYPE_IOCTL_RX, FALSE);
3760 free_ioctl_return_buffer(dhd, &retbuf);
3761 #endif /* !IOCTLRESP_USE_CONSTMEM */
3764 /** called on MSG_TYPE_TX_STATUS message received from dongle */
3765 static void BCMFASTPATH
3766 dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
3768 dhd_prot_t *prot = dhd->prot;
3769 host_txbuf_cmpl_t * txstatus;
3770 unsigned long flags;
3778 /* locks required to protect circular buffer accesses */
3779 DHD_GENERAL_LOCK(dhd, flags);
3781 txstatus = (host_txbuf_cmpl_t *)msg;
3782 pktid = ltoh32(txstatus->cmn_hdr.request_id);
3784 #if defined(DHD_PKTID_AUDIT_RING)
3785 if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
3786 DHD_DUPLICATE_FREE) == BCME_ERROR) {
3787 prhex("dhd_prot_txstatus_process:",
3788 (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
3790 #endif /* DHD_PKTID_AUDIT_RING */
3792 DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
3793 if (prot->active_tx_count) {
3794 prot->active_tx_count--;
3796 /* Release the Lock when no more tx packets are pending */
3797 if (prot->active_tx_count == 0)
3798 DHD_OS_WAKE_UNLOCK(dhd);
3801 DHD_ERROR(("Extra packets are freed\n"));
3806 #if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
3812 pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
3813 pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX);
3815 workq = &prot->tx_compl_prod;
3817 * Produce the packet into the tx_compl workq for the tx compl tasklet
3820 OSL_PREFETCH(PKTTAG(pkt));
3822 /* fetch next available slot in workq */
3823 elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
3825 DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa);
3826 DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len);
3828 if (elem_ix == BCM_RING_FULL) {
3829 DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
3830 goto workq_ring_full;
3833 elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix);
3838 /* Sync WR index to consumer if the SYNC threshold has been reached */
3839 if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
3840 bcm_workq_prod_sync(workq);
3841 prot->tx_compl_prod_sync = 0;
3844 DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
3845 __FUNCTION__, pkt, prot->tx_compl_prod_sync));
3847 DHD_GENERAL_UNLOCK(dhd, flags);
3853 #endif /* !DHD_LB_TXC */
3856 * We can come here if no DHD_LB_TXC is enabled and in case where DHD_LB_TXC is
3857 * defined but the tx_compl queue is full.
3860 pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
3861 pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX);
3865 if (SECURE_DMA_ENAB(dhd->osh)) {
3867 BCM_REFERENCE(offset);
3869 if (dhd->prot->tx_metadata_offset)
3870 offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
3871 SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
3872 (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
3875 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
3877 #if defined(BCMPCIE)
3878 dhd_txcomplete(dhd, pkt, true);
3881 #if DHD_DBG_SHOW_METADATA
3882 if (dhd->prot->metadata_dbg &&
3883 dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
3885 /* The Ethernet header of TX frame was copied and removed.
3886 * Here, move the data pointer forward by Ethernet header size.
3888 PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
3889 ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
3890 bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
3891 dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
3893 #endif /* DHD_DBG_SHOW_METADATA */
3894 PKTFREE(dhd->osh, pkt, TRUE);
3895 DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
3896 txstatus->tx_status);
3899 DHD_GENERAL_UNLOCK(dhd, flags);
3902 } /* dhd_prot_txstatus_process */
3904 /** called on MSG_TYPE_WL_EVENT message received from dongle */
3906 dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
3908 wlevent_req_msg_t *evnt;
3913 unsigned long flags;
3914 dhd_prot_t *prot = dhd->prot;
3916 /* Event complete header */
3917 evnt = (wlevent_req_msg_t *)msg;
3918 bufid = ltoh32(evnt->cmn_hdr.request_id);
3920 #if defined(DHD_PKTID_AUDIT_RING)
3921 if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, bufid,
3922 DHD_DUPLICATE_FREE) == BCME_ERROR) {
3923 prhex("dhd_prot_event_process:",
3924 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
3926 #endif /* DHD_PKTID_AUDIT_RING */
3928 buflen = ltoh16(evnt->event_data_len);
3930 ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
3932 /* Post another rxbuf to the device */
3933 if (prot->cur_event_bufs_posted) {
3934 prot->cur_event_bufs_posted--;
3936 dhd_msgbuf_rxbuf_post_event_bufs(dhd);
3938 /* locks required to protect pktid_map */
3939 DHD_GENERAL_LOCK(dhd, flags);
3940 pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
3941 DHD_GENERAL_UNLOCK(dhd, flags);
3947 /* DMA RX offset updated through shared area */
3948 if (dhd->prot->rx_dataoffset) {
3949 PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
3952 PKTSETLEN(dhd->osh, pkt, buflen);
3954 dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
3957 /** called on MSG_TYPE_RX_CMPLT message received from dongle */
3958 static void BCMFASTPATH
3959 dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg)
3961 host_rxbuf_cmpl_t *rxcmplt_h;
3962 uint16 data_offset; /* offset at which data starts */
3964 unsigned long flags;
3967 #if defined(DHD_LB_RXC)
3968 const bool free_pktid = FALSE;
3970 const bool free_pktid = TRUE;
3971 #endif /* DHD_LB_RXC */
3974 rxcmplt_h = (host_rxbuf_cmpl_t *)msg;
3976 /* offset from which data starts is populated in rxstatus0 */
3977 data_offset = ltoh16(rxcmplt_h->data_offset);
3979 pktid = ltoh32(rxcmplt_h->cmn_hdr.request_id);
3981 #if defined(DHD_PKTID_AUDIT_RING)
3982 if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
3983 DHD_DUPLICATE_FREE) == BCME_ERROR) {
3984 prhex("dhd_prot_rxcmplt_process:",
3985 (uchar *)msg, D2HRING_RXCMPLT_ITEMSIZE);
3987 #endif /* DHD_PKTID_AUDIT_RING */
3989 DHD_GENERAL_LOCK(dhd, flags);
3990 pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_DATA_RX, free_pktid);
3991 DHD_GENERAL_UNLOCK(dhd, flags);
3997 /* Post another set of rxbufs to the device */
3998 dhd_prot_return_rxbuf(dhd, pktid, 1);
4000 DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n",
4001 ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len),
4002 rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
4003 ltoh16(rxcmplt_h->metadata_len)));
4004 #if DHD_DBG_SHOW_METADATA
4005 if (dhd->prot->metadata_dbg &&
4006 dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) {
4008 ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset);
4009 /* header followed by data */
4010 bcm_print_bytes("rxmetadata", ptr, rxcmplt_h->metadata_len);
4011 dhd_prot_print_metadata(dhd, ptr, rxcmplt_h->metadata_len);
4013 #endif /* DHD_DBG_SHOW_METADATA */
4015 if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
4016 DHD_INFO(("D11 frame rxed \n"));
4019 /* data_offset from buf start */
4021 /* data offset given from dongle after split rx */
4022 PKTPULL(dhd->osh, pkt, data_offset); /* data offset */
4024 /* DMA RX offset updated through shared area */
4025 if (dhd->prot->rx_dataoffset) {
4026 PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
4029 /* Actual length of the packet */
4030 PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len));
4032 ifidx = rxcmplt_h->cmn_hdr.if_id;
4034 #if defined(DHD_LB_RXP)
4035 dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
4036 #else /* ! DHD_LB_RXP */
4037 #ifdef DHD_RX_CHAINING
4038 /* Chain the packets */
4039 dhd_rxchain_frame(dhd, pkt, ifidx);
4040 #else /* ! DHD_RX_CHAINING */
4041 /* offset from which data starts is populated in rxstatus0 */
4042 dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
4043 #endif /* ! DHD_RX_CHAINING */
4044 #endif /* ! DHD_LB_RXP */
4045 } /* dhd_prot_rxcmplt_process */
4047 /** Stop protocol: sync w/dongle state. */
4048 void dhd_prot_stop(dhd_pub_t *dhd)
4051 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4055 /* Add any protocol-specific data header.
4056 * Caller must reserve prot_hdrlen prepend space.
4059 dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
4065 dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
4071 #define PKTBUF pktbuf
4074 * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
4075 * the corresponding flow ring.
4078 dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
4080 unsigned long flags;
4081 dhd_prot_t *prot = dhd->prot;
4082 host_txbuf_post_t *txdesc = NULL;
4083 dmaaddr_t pa, meta_pa;
4091 msgbuf_ring_t *ring;
4092 flow_ring_table_t *flow_ring_table;
4093 flow_ring_node_t *flow_ring_node;
4095 if (dhd->flow_ring_table == NULL) {
4096 return BCME_NORESOURCE;
4099 flowid = DHD_PKT_GET_FLOWID(PKTBUF);
4101 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
4102 flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
4104 ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
4107 DHD_GENERAL_LOCK(dhd, flags);
4109 /* Create a unique 32-bit packet id */
4110 pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_map_handle, PKTBUF);
4111 #if defined(DHD_PCIE_PKTID)
4112 if (pktid == DHD_PKTID_INVALID) {
4113 DHD_ERROR(("Pktid pool depleted.\n"));
4115 * If we return error here, the caller would queue the packet
4116 * again. So we'll just free the skb allocated in DMA Zone.
4117 * Since we have not freed the original SKB yet the caller would
4120 goto err_no_res_pktfree;
4122 #endif /* DHD_PCIE_PKTID */
4124 /* Reserve space in the circular buffer */
4125 txdesc = (host_txbuf_post_t *)
4126 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
4127 if (txdesc == NULL) {
4128 #if defined(DHD_PCIE_PKTID)
4131 /* Free up the PKTID. physaddr and pktlen will be garbage. */
4132 DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid,
4133 pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
4134 #endif /* DHD_PCIE_PKTID */
4135 DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
4136 __FUNCTION__, __LINE__, prot->active_tx_count));
4137 goto err_no_res_pktfree;
4140 /* Extract the data pointer and length information */
4141 pktdata = PKTDATA(dhd->osh, PKTBUF);
4142 pktlen = PKTLEN(dhd->osh, PKTBUF);
4144 /* Ethernet header: Copy before we cache flush packet using DMA_MAP */
4145 bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
4147 /* Extract the ethernet header and adjust the data pointer and length */
4148 pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
4149 pktlen -= ETHER_HDR_LEN;
4151 /* Map the data pointer to a DMA-able address */
4152 if (SECURE_DMA_ENAB(dhd->osh)) {
4154 BCM_REFERENCE(offset);
4156 if (prot->tx_metadata_offset) {
4157 offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
4160 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
4161 DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset);
4163 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
4166 if ((PHYSADDRHI(pa) == 0) && (PHYSADDRLO(pa) == 0)) {
4167 DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
4171 /* No need to lock. Save the rest of the packet's metadata */
4172 DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, PKTBUF, pktid,
4173 pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
4175 #ifdef TXP_FLUSH_NITEMS
4176 if (ring->pend_items_count == 0) {
4177 ring->start_addr = (void *)txdesc;
4179 ring->pend_items_count++;
4182 /* Form the Tx descriptor message buffer */
4184 /* Common message hdr */
4185 txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
4186 txdesc->cmn_hdr.if_id = ifidx;
4188 txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
4189 prio = (uint8)PKTPRIO(PKTBUF);
4192 txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
4193 txdesc->seg_cnt = 1;
4195 txdesc->data_len = htol16((uint16) pktlen);
4196 txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4197 txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
4199 /* Move data pointer to keep ether header in local PKTBUF for later reference */
4200 PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
4202 /* Handle Tx metadata */
4203 headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
4204 if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) {
4205 DHD_ERROR(("No headroom for Metadata tx %d %d\n",
4206 prot->tx_metadata_offset, headroom));
4209 if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
4210 DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
4212 /* Adjust the data pointer to account for meta data in DMA_MAP */
4213 PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
4215 if (SECURE_DMA_ENAB(dhd->osh)) {
4216 meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
4217 prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF,
4218 0, ring->dma_buf.secdma);
4220 meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
4221 prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
4224 if (PHYSADDRISZERO(meta_pa)) {
4225 DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
4229 /* Adjust the data pointer back to original value */
4230 PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
4232 txdesc->metadata_buf_len = prot->tx_metadata_offset;
4233 txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
4234 txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
4236 txdesc->metadata_buf_len = htol16(0);
4237 txdesc->metadata_buf_addr.high_addr = 0;
4238 txdesc->metadata_buf_addr.low_addr = 0;
4241 #if defined(DHD_PKTID_AUDIT_RING)
4242 DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid,
4243 DHD_DUPLICATE_ALLOC);
4244 #endif /* DHD_PKTID_AUDIT_RING */
4246 txdesc->cmn_hdr.request_id = htol32(pktid);
4248 DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
4249 txdesc->cmn_hdr.request_id));
4251 /* Update the write pointer in TCM & ring bell */
4252 #ifdef TXP_FLUSH_NITEMS
4253 /* Flush if we have either hit the txp_threshold or if this msg is */
4254 /* occupying the last slot in the flow_ring - before wrap around. */
4255 if ((ring->pend_items_count == prot->txp_threshold) ||
4256 ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
4257 dhd_prot_txdata_write_flush(dhd, flowid, TRUE);
4260 /* update ring's WR index and ring doorbell to dongle */
4261 dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
4264 prot->active_tx_count++;
4267 * Take a wake lock, do not sleep if we have atleast one packet
4270 if (prot->active_tx_count == 1)
4271 DHD_OS_WAKE_LOCK(dhd);
4273 DHD_GENERAL_UNLOCK(dhd, flags);
4281 DHD_GENERAL_UNLOCK(dhd, flags);
4282 return BCME_NORESOURCE;
4283 } /* dhd_prot_txdata */
4285 /* called with a lock */
4286 /** optimization to write "n" tx items at a time to ring */
4288 dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock)
4290 #ifdef TXP_FLUSH_NITEMS
4291 unsigned long flags = 0;
4292 flow_ring_table_t *flow_ring_table;
4293 flow_ring_node_t *flow_ring_node;
4294 msgbuf_ring_t *ring;
4296 if (dhd->flow_ring_table == NULL) {
4301 DHD_GENERAL_LOCK(dhd, flags);
4304 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
4305 flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
4306 ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
4308 if (ring->pend_items_count) {
4309 /* update ring's WR index and ring doorbell to dongle */
4310 dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
4311 ring->pend_items_count);
4312 ring->pend_items_count = 0;
4313 ring->start_addr = NULL;
4317 DHD_GENERAL_UNLOCK(dhd, flags);
4319 #endif /* TXP_FLUSH_NITEMS */
4322 #undef PKTBUF /* Only defined in the above routine */
4325 dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
4330 /** post a set of receive buffers to the dongle */
4331 static void BCMFASTPATH
4332 dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt)
4334 dhd_prot_t *prot = dhd->prot;
4335 #if defined(DHD_LB_RXC)
4340 workq = &prot->rx_compl_prod;
4342 /* Produce the work item */
4343 elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
4344 if (elem_ix == BCM_RING_FULL) {
4345 DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__));
4350 elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
4355 /* Sync WR index to consumer if the SYNC threshold has been reached */
4356 if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
4357 bcm_workq_prod_sync(workq);
4358 prot->rx_compl_prod_sync = 0;
4361 DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
4362 __FUNCTION__, pktid, prot->rx_compl_prod_sync));
4364 #endif /* DHD_LB_RXC */
4367 if (prot->rxbufpost >= rxcnt) {
4368 prot->rxbufpost -= rxcnt;
4371 prot->rxbufpost = 0;
4374 #if !defined(DHD_LB_RXC)
4375 if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
4376 dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
4378 #endif /* !DHD_LB_RXC */
4381 /* called before an ioctl is sent to the dongle */
4383 dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
4385 dhd_prot_t *prot = dhd->prot;
4387 if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
4389 pcie_bus_tput_params_t *tput_params;
4391 slen = strlen("pcie_bus_tput") + 1;
4392 tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
4393 bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
4394 sizeof(tput_params->host_buf_addr));
4395 tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
4400 /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
4401 int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
4406 if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
4407 DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
4411 if (dhd->busstate == DHD_BUS_SUSPEND) {
4412 DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
4416 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4418 if (ioc->cmd == WLC_SET_PM) {
4419 DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, *(char *)buf));
4422 ASSERT(len <= WLC_IOCTL_MAXLEN);
4424 if (len > WLC_IOCTL_MAXLEN) {
4430 dhd_prot_wlioctl_intercept(dhd, ioc, buf);
4432 if (action & WL_IOCTL_ACTION_SET) {
4433 ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
4435 ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
4441 /* Too many programs assume ioctl() returns 0 on success */
4445 DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret));
4446 dhd->dongle_error = ret;
4449 if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
4450 /* Intercept the wme_dp ioctl here */
4451 if (!strcmp(buf, "wme_dp")) {
4454 slen = strlen("wme_dp") + 1;
4455 if (len >= (int)(slen + sizeof(int))) {
4456 bcopy(((char *)buf + slen), &val, sizeof(int));
4458 dhd->wme_dp = (uint8) ltoh32(val);
4466 } /* dhd_prot_ioctl */
4468 /** test / loopback */
4471 dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
4473 unsigned long flags;
4474 dhd_prot_t *prot = dhd->prot;
4477 ioct_reqst_hdr_t *ioct_rqst;
4479 uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
4480 uint16 msglen = len + hdrlen;
4481 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
4483 msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
4484 msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
4486 DHD_GENERAL_LOCK(dhd, flags);
4488 ioct_rqst = (ioct_reqst_hdr_t *)
4489 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
4491 if (ioct_rqst == NULL) {
4492 DHD_GENERAL_UNLOCK(dhd, flags);
4500 ptr = (uint8 *)ioct_rqst;
4501 for (i = 0; i < msglen; i++) {
4506 /* Common msg buf hdr */
4507 ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4510 ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
4511 ioct_rqst->msg.if_id = 0;
4513 bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
4515 /* update ring's WR index and ring doorbell to dongle */
4516 dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
4517 DHD_GENERAL_UNLOCK(dhd, flags);
4522 /** test / loopback */
4523 void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
4525 if (dmaxfer == NULL) {
4529 dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
4530 dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
4533 /** test / loopback */
4534 int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
4535 uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
4542 /* First free up existing buffers */
4543 dmaxfer_free_dmaaddr(dhd, dmaxfer);
4545 if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
4549 if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
4550 dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
4556 /* Populate source with a pattern */
4557 for (i = 0; i < dmaxfer->len; i++) {
4558 ((uint8*)dmaxfer->srcmem.va)[i] = i % 256;
4560 OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
4562 dmaxfer->srcdelay = srcdelay;
4563 dmaxfer->destdelay = destdelay;
4566 } /* dmaxfer_prepare_dmaaddr */
4569 dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
4571 dhd_prot_t *prot = dhd->prot;
4573 OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
4574 if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
4575 if (memcmp(prot->dmaxfer.srcmem.va,
4576 prot->dmaxfer.dstmem.va, prot->dmaxfer.len)) {
4577 bcm_print_bytes("XFER SRC: ",
4578 prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
4579 bcm_print_bytes("XFER DST: ",
4580 prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
4582 DHD_INFO(("DMA successful\n"));
4585 dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
4586 dhd->prot->dmaxfer.in_progress = FALSE;
4589 /** Test functionality.
4590 * Transfers bytes from host to dongle and to host again using DMA
4591 * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
4595 dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay)
4597 unsigned long flags;
4599 dhd_prot_t *prot = dhd->prot;
4600 pcie_dma_xfer_params_t *dmap;
4601 uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
4603 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
4605 if (prot->dmaxfer.in_progress) {
4606 DHD_ERROR(("DMA is in progress...\n"));
4610 prot->dmaxfer.in_progress = TRUE;
4611 if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
4612 &prot->dmaxfer)) != BCME_OK) {
4613 prot->dmaxfer.in_progress = FALSE;
4617 DHD_GENERAL_LOCK(dhd, flags);
4619 dmap = (pcie_dma_xfer_params_t *)
4620 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
4623 dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
4624 prot->dmaxfer.in_progress = FALSE;
4625 DHD_GENERAL_UNLOCK(dhd, flags);
4629 /* Common msg buf hdr */
4630 dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
4631 dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
4632 dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4635 dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
4636 dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
4637 dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
4638 dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
4639 dmap->xfer_len = htol32(prot->dmaxfer.len);
4640 dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
4641 dmap->destdelay = htol32(prot->dmaxfer.destdelay);
4643 /* update ring's WR index and ring doorbell to dongle */
4644 dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
4645 DHD_GENERAL_UNLOCK(dhd, flags);
4647 DHD_ERROR(("DMA Started...\n"));
4650 } /* dhdmsgbuf_dmaxfer_req */
4652 /** Called in the process of submitting an ioctl to the dongle */
4654 dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
4658 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4660 /* Respond "bcmerror" and "bcmerrorstr" with local cache */
4661 if (cmd == WLC_GET_VAR && buf)
4663 if (!strcmp((char *)buf, "bcmerrorstr"))
4665 strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
4668 else if (!strcmp((char *)buf, "bcmerror"))
4670 *(int *)buf = dhd->dongle_error;
4675 ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
4677 DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
4678 action, ifidx, cmd, len));
4680 /* wait for IOCTL completion message from dongle and get first fragment */
4681 ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
4688 * Waits for IOCTL completion message from the dongle, copies this into caller
4689 * provided parameter 'buf'.
4692 dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
4694 dhd_prot_t *prot = dhd->prot;
4696 unsigned long flags;
4699 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4701 if (dhd->dongle_reset) {
4706 if (prot->cur_ioctlresp_bufs_posted) {
4707 prot->cur_ioctlresp_bufs_posted--;
4710 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
4712 timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
4713 if (timeleft == 0) {
4714 dhd->rxcnt_timeout++;
4716 DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d "
4717 "trans_id %d state %d busstate=%d ioctl_received=%d\n",
4718 __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd,
4719 prot->ioctl_trans_id, prot->ioctl_state,
4720 dhd->busstate, prot->ioctl_received));
4722 dhd_prot_debug_info_print(dhd);
4724 #ifdef DHD_FW_COREDUMP
4725 /* As soon as FW TRAP occurs, FW dump will be collected from dhdpcie_checkdied */
4726 if (dhd->memdump_enabled && !dhd->dongle_trap_occured) {
4727 /* collect core dump */
4728 dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
4729 dhd_bus_mem_dump(dhd);
4731 #endif /* DHD_FW_COREDUMP */
4732 if (dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) {
4733 #ifdef SUPPORT_LINKDOWN_RECOVERY
4734 #ifdef CONFIG_ARCH_MSM
4735 dhd->bus->no_cfg_restore = 1;
4736 #endif /* CONFIG_ARCH_MSM */
4737 #endif /* SUPPORT_LINKDOWN_RECOVERY */
4738 DHD_ERROR(("%s: timeout > MAX_CNTL_TX_TIMEOUT\n", __FUNCTION__));
4743 if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
4744 DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
4745 __FUNCTION__, prot->ioctl_received));
4746 ret = -ECONNABORTED;
4749 dhd->rxcnt_timeout = 0;
4751 DHD_CTL(("%s: ioctl resp resumed, got %d\n",
4752 __FUNCTION__, prot->ioctl_resplen));
4755 if (dhd->dongle_trap_occured) {
4756 #ifdef SUPPORT_LINKDOWN_RECOVERY
4757 #ifdef CONFIG_ARCH_MSM
4758 dhd->bus->no_cfg_restore = 1;
4759 #endif /* CONFIG_ARCH_MSM */
4760 #endif /* SUPPORT_LINKDOWN_RECOVERY */
4761 DHD_ERROR(("%s: TRAP occurred!!\n", __FUNCTION__));
4766 if (dhd->prot->ioctl_resplen > len) {
4767 dhd->prot->ioctl_resplen = (uint16)len;
4770 bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
4773 ret = (int)(dhd->prot->ioctl_status);
4775 DHD_GENERAL_LOCK(dhd, flags);
4776 dhd->prot->ioctl_state = 0;
4777 dhd->prot->ioctl_resplen = 0;
4778 dhd->prot->ioctl_received = IOCTL_WAIT;
4779 dhd->prot->curr_ioctl_cmd = 0;
4780 DHD_GENERAL_UNLOCK(dhd, flags);
4783 } /* dhd_msgbuf_wait_ioctl_cmplt */
4786 dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
4790 DHD_TRACE(("%s: Enter \n", __FUNCTION__));
4792 if (dhd->busstate == DHD_BUS_DOWN) {
4793 DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
4797 /* don't talk to the dongle if fw is about to be reloaded */
4798 if (dhd->hang_was_sent) {
4799 DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
4804 /* Fill up msgbuf for ioctl req */
4805 ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
4807 DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
4808 action, ifidx, cmd, len));
4810 ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
4815 /** Called by upper DHD layer. Handles a protocol control response asynchronously. */
4816 int dhd_prot_ctl_complete(dhd_pub_t *dhd)
4821 /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
4822 int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
4823 void *params, int plen, void *arg, int len, bool set)
4825 return BCME_UNSUPPORTED;
4828 /** Add prot dump output to a buffer */
4829 void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
4832 #if defined(PCIE_D2H_SYNC)
4833 if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
4834 bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
4835 else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
4836 bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
4838 bcm_bprintf(b, "\nd2h_sync: NONE:");
4839 bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
4840 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
4841 #endif /* PCIE_D2H_SYNC */
4843 bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n",
4844 DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support),
4845 DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support),
4846 dhd->prot->rw_index_sz);
4849 /* Update local copy of dongle statistics */
4850 void dhd_prot_dstats(dhd_pub_t *dhd)
4855 /** Called by upper DHD layer */
4856 int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
4857 uint reorder_info_len, void **pkt, uint32 *free_buf_count)
4862 /** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
4864 dhd_post_dummy_msg(dhd_pub_t *dhd)
4866 unsigned long flags;
4867 hostevent_hdr_t *hevent = NULL;
4870 dhd_prot_t *prot = dhd->prot;
4871 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
4873 DHD_GENERAL_LOCK(dhd, flags);
4875 hevent = (hostevent_hdr_t *)
4876 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
4878 if (hevent == NULL) {
4879 DHD_GENERAL_UNLOCK(dhd, flags);
4883 /* CMN msg header */
4884 hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4886 hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
4887 hevent->msg.if_id = 0;
4890 hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
4892 /* Since, we are filling the data directly into the bufptr obtained
4893 * from the msgbuf, we can directly call the write_complete
4895 dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
4896 DHD_GENERAL_UNLOCK(dhd, flags);
4902 * If exactly_nitems is true, this function will allocate space for nitems or fail
4903 * If exactly_nitems is false, this function will allocate space for nitems or less
4905 static void * BCMFASTPATH
4906 dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
4907 uint16 nitems, uint16 * alloced, bool exactly_nitems)
4911 /* Alloc space for nitems in the ring */
4912 ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
4914 if (ret_buf == NULL) {
4915 /* if alloc failed , invalidate cached read ptr */
4916 if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
4917 ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
4919 dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
4922 /* Try allocating once more */
4923 ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
4925 if (ret_buf == NULL) {
4926 DHD_INFO(("%s: Ring space not available \n", ring->name));
4931 /* Return alloced space */
4936 * Non inline ioct request.
4937 * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
4938 * Form a separate request buffer where a 4 byte cmn header is added in the front
4939 * buf contents from parent function is copied to remaining section of this buffer
4942 dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
4944 dhd_prot_t *prot = dhd->prot;
4945 ioctl_req_msg_t *ioct_rqst;
4946 void * ioct_buf; /* For ioctl payload */
4947 uint16 rqstlen, resplen;
4948 unsigned long flags;
4950 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
4955 /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
4956 /* 8K allocation of dongle buffer fails */
4957 /* dhd doesnt give separate input & output buf lens */
4958 /* so making the assumption that input length can never be more than 1.5k */
4959 rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE);
4961 DHD_GENERAL_LOCK(dhd, flags);
4963 if (prot->ioctl_state) {
4964 DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
4965 DHD_GENERAL_UNLOCK(dhd, flags);
4968 prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
4971 /* Request for cbuf space */
4972 ioct_rqst = (ioctl_req_msg_t*)
4973 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
4974 if (ioct_rqst == NULL) {
4975 DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
4976 prot->ioctl_state = 0;
4977 prot->curr_ioctl_cmd = 0;
4978 prot->ioctl_received = IOCTL_WAIT;
4979 DHD_GENERAL_UNLOCK(dhd, flags);
4983 /* Common msg buf hdr */
4984 ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
4985 ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
4986 ioct_rqst->cmn_hdr.flags = 0;
4987 ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
4988 ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4991 ioct_rqst->cmd = htol32(cmd);
4992 prot->curr_ioctl_cmd = cmd;
4993 ioct_rqst->output_buf_len = htol16(resplen);
4994 prot->ioctl_trans_id++;
4995 ioct_rqst->trans_id = prot->ioctl_trans_id;
4997 /* populate ioctl buffer info */
4998 ioct_rqst->input_buf_len = htol16(rqstlen);
4999 ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
5000 ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
5001 /* copy ioct payload */
5002 ioct_buf = (void *) prot->ioctbuf.va;
5005 memcpy(ioct_buf, buf, len);
5008 OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
5010 if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) {
5011 DHD_ERROR(("host ioct address unaligned !!!!! \n"));
5014 DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
5015 ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
5016 ioct_rqst->trans_id));
5018 /* update ring's WR index and ring doorbell to dongle */
5019 dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
5020 DHD_GENERAL_UNLOCK(dhd, flags);
5023 } /* dhd_fillup_ioct_reqst */
5027 * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
5028 * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
5029 * information is posted to the dongle.
5031 * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
5032 * each flowring in pool of flowrings.
5034 * returns BCME_OK=0 on success
5035 * returns non-zero negative error value on failure.
5038 dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
5039 uint16 max_items, uint16 item_len, uint16 ringid)
5041 int dma_buf_alloced = BCME_NOMEM;
5042 uint32 dma_buf_len = max_items * item_len;
5043 dhd_prot_t *prot = dhd->prot;
5047 ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
5050 strncpy(ring->name, name, RING_NAME_MAX_LENGTH);
5051 ring->name[RING_NAME_MAX_LENGTH - 1] = '\0';
5055 ring->max_items = max_items;
5056 ring->item_len = item_len;
5058 /* A contiguous space may be reserved for all flowrings */
5059 if (DHD_IS_FLOWRING(ringid) && (prot->flowrings_dma_buf.va)) {
5060 /* Carve out from the contiguous DMA-able flowring buffer */
5064 dhd_dma_buf_t *dma_buf = &ring->dma_buf;
5065 dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
5067 flowid = DHD_RINGID_TO_FLOWID(ringid);
5068 base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
5070 ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
5072 dma_buf->len = dma_buf_len;
5073 dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
5074 PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
5075 PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
5077 /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
5078 ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
5080 dma_buf->dmah = rsv_buf->dmah;
5081 dma_buf->secdma = rsv_buf->secdma;
5083 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
5085 /* Allocate a dhd_dma_buf */
5086 dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
5087 if (dma_buf_alloced != BCME_OK) {
5092 /* CAUTION: Save ring::base_addr in little endian format! */
5093 dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
5095 #ifdef BCM_SECURE_DMA
5096 if (SECURE_DMA_ENAB(prot->osh)) {
5097 ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t));
5098 if (ring->dma_buf.secdma == NULL) {
5102 #endif /* BCM_SECURE_DMA */
5104 DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
5105 "ring start %p buf phys addr %x:%x \n",
5106 ring->name, ring->max_items, ring->item_len,
5107 dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
5108 ltoh32(ring->base_addr.low_addr)));
5112 #ifdef BCM_SECURE_DMA
5114 if (dma_buf_alloced == BCME_OK) {
5115 dhd_dma_buf_free(dhd, &ring->dma_buf);
5117 #endif /* BCM_SECURE_DMA */
5121 } /* dhd_prot_ring_attach */
5125 * dhd_prot_ring_init - Post the common ring information to dongle.
5127 * Used only for common rings.
5129 * The flowrings information is passed via the create flowring control message
5130 * (tx_flowring_create_request_t) sent over the H2D control submission common
5134 dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
5140 /* CAUTION: ring::base_addr already in Little Endian */
5141 dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
5142 sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
5143 dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
5144 sizeof(uint16), RING_MAX_ITEMS, ring->idx);
5145 dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
5146 sizeof(uint16), RING_ITEM_LEN, ring->idx);
5148 dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
5149 sizeof(uint16), RING_WR_UPD, ring->idx);
5150 dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
5151 sizeof(uint16), RING_RD_UPD, ring->idx);
5154 ring->inited = TRUE;
5156 } /* dhd_prot_ring_init */
5160 * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
5161 * Reset WR and RD indices to 0.
5164 dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
5166 DHD_TRACE(("%s\n", __FUNCTION__));
5168 dhd_dma_buf_reset(dhd, &ring->dma_buf);
5170 ring->rd = ring->wr = 0;
5176 * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
5177 * hanging off the msgbuf_ring.
5180 dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
5182 dhd_prot_t *prot = dhd->prot;
5185 ring->inited = FALSE;
5186 /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
5188 #ifdef BCM_SECURE_DMA
5189 if (SECURE_DMA_ENAB(prot->osh)) {
5190 SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma);
5191 if (ring->dma_buf.secdma) {
5192 MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t));
5194 ring->dma_buf.secdma = NULL;
5196 #endif /* BCM_SECURE_DMA */
5198 /* If the DMA-able buffer was carved out of a pre-reserved contiguous
5199 * memory, then simply stop using it.
5201 if (DHD_IS_FLOWRING(ring->idx) && (prot->flowrings_dma_buf.va)) {
5202 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
5203 memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
5205 dhd_dma_buf_free(dhd, &ring->dma_buf);
5208 } /* dhd_prot_ring_detach */
5212 * +----------------------------------------------------------------------------
5215 * Unlike common rings, which are attached very early on (dhd_prot_attach),
5216 * flowrings are dynamically instantiated. Moreover, flowrings may require a
5217 * larger DMA-able buffer. To avoid issues with fragmented cache coherent
5218 * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
5219 * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
5221 * Each DMA-able buffer may be allocated independently, or may be carved out
5222 * of a single large contiguous region that is registered with the protocol
5223 * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
5224 * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
5226 * No flowring pool action is performed in dhd_prot_attach(), as the number
5227 * of h2d rings is not yet known.
5229 * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
5230 * determine the number of flowrings required, and a pool of msgbuf_rings are
5231 * allocated and a DMA-able buffer (carved or allocated) is attached.
5232 * See: dhd_prot_flowrings_pool_attach()
5234 * A flowring msgbuf_ring object may be fetched from this pool during flowring
5235 * creation, using the flowid. Likewise, flowrings may be freed back into the
5236 * pool on flowring deletion.
5237 * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
5239 * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
5240 * are detached (returned back to the carved region or freed), and the pool of
5241 * msgbuf_ring and any objects allocated against it are freed.
5242 * See: dhd_prot_flowrings_pool_detach()
5244 * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
5245 * state as-if upon an attach. All DMA-able buffers are retained.
5246 * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
5247 * pool attach will notice that the pool persists and continue to use it. This
5248 * will avoid the case of a fragmented DMA-able region.
5250 * +----------------------------------------------------------------------------
5253 /* Fetch number of H2D flowrings given the total number of h2d rings */
5254 #define DHD_FLOWRINGS_POOL_TOTAL(h2d_rings_total) \
5255 ((h2d_rings_total) - BCMPCIE_H2D_COMMON_MSGRINGS)
5257 /* Conversion of a flowid to a flowring pool index */
5258 #define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
5259 ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
5261 /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
5262 #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
5263 (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + DHD_FLOWRINGS_POOL_OFFSET(flowid)
5265 /* Traverse each flowring in the flowring pool, assigning ring and flowid */
5266 #define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) \
5267 for ((flowid) = DHD_FLOWRING_START_FLOWID, \
5268 (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
5269 (flowid) < (prot)->h2d_rings_total; \
5270 (flowid)++, (ring)++)
5273 * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
5275 * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
5276 * Dongle includes common rings when it advertizes the number of H2D rings.
5277 * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
5278 * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
5280 * dhd_prot_ring_attach is invoked to perform the actual initialization and
5281 * attaching the DMA-able buffer.
5283 * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
5284 * initialized msgbuf_ring_t object.
5286 * returns BCME_OK=0 on success
5287 * returns non-zero negative error value on failure.
5290 dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
5293 msgbuf_ring_t *ring;
5294 uint16 h2d_flowrings_total; /* exclude H2D common rings */
5295 dhd_prot_t *prot = dhd->prot;
5296 char ring_name[RING_NAME_MAX_LENGTH];
5298 if (prot->h2d_flowrings_pool != NULL) {
5299 return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
5302 ASSERT(prot->h2d_rings_total == 0);
5304 /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
5305 prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
5307 if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
5308 DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
5309 __FUNCTION__, prot->h2d_rings_total));
5313 /* Subtract number of H2D common rings, to determine number of flowrings */
5314 h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total);
5316 DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
5318 /* Allocate pool of msgbuf_ring_t objects for all flowrings */
5319 prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
5320 (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
5322 if (prot->h2d_flowrings_pool == NULL) {
5323 DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
5324 __FUNCTION__, h2d_flowrings_total));
5328 /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
5329 FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
5330 snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
5331 ring_name[RING_NAME_MAX_LENGTH - 1] = '\0';
5332 if (dhd_prot_ring_attach(dhd, ring, ring_name,
5333 H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE,
5334 DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
5342 dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
5345 prot->h2d_rings_total = 0;
5348 } /* dhd_prot_flowrings_pool_attach */
5352 * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
5353 * Invokes dhd_prot_ring_reset to perform the actual reset.
5355 * The DMA-able buffer is not freed during reset and neither is the flowring
5358 * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
5359 * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
5360 * from a previous flowring pool instantiation will be reused.
5362 * This will avoid a fragmented DMA-able memory condition, if multiple
5363 * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
5367 dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
5370 msgbuf_ring_t *ring;
5371 dhd_prot_t *prot = dhd->prot;
5373 if (prot->h2d_flowrings_pool == NULL) {
5374 ASSERT(prot->h2d_rings_total == 0);
5378 /* Reset each flowring in the flowring pool */
5379 FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
5380 dhd_prot_ring_reset(dhd, ring);
5381 ring->inited = FALSE;
5384 /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
5389 * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
5390 * DMA-able buffers for flowrings.
5391 * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
5392 * de-initialization of each msgbuf_ring_t.
5395 dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
5398 msgbuf_ring_t *ring;
5399 int h2d_flowrings_total; /* exclude H2D common rings */
5400 dhd_prot_t *prot = dhd->prot;
5402 if (prot->h2d_flowrings_pool == NULL) {
5403 ASSERT(prot->h2d_rings_total == 0);
5407 /* Detach the DMA-able buffer for each flowring in the flowring pool */
5408 FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
5409 dhd_prot_ring_detach(dhd, ring);
5412 h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total);
5414 MFREE(prot->osh, prot->h2d_flowrings_pool,
5415 (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
5417 prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL;
5418 prot->h2d_rings_total = 0;
5420 } /* dhd_prot_flowrings_pool_detach */
5424 * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
5425 * msgbuf_ring from the flowring pool, and assign it.
5427 * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
5428 * ring information to the dongle, a flowring's information is passed via a
5429 * flowring create control message.
5431 * Only the ring state (WR, RD) index are initialized.
5433 static msgbuf_ring_t *
5434 dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
5436 msgbuf_ring_t *ring;
5437 dhd_prot_t *prot = dhd->prot;
5439 ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
5440 ASSERT(flowid < prot->h2d_rings_total);
5441 ASSERT(prot->h2d_flowrings_pool != NULL);
5443 ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
5445 /* ASSERT flow_ring->inited == FALSE */
5450 ring->inited = TRUE;
5457 * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
5458 * msgbuf_ring back to the flow_ring pool.
5461 dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
5463 msgbuf_ring_t *ring;
5464 dhd_prot_t *prot = dhd->prot;
5466 ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
5467 ASSERT(flowid < prot->h2d_rings_total);
5468 ASSERT(prot->h2d_flowrings_pool != NULL);
5470 ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
5472 ASSERT(ring == (msgbuf_ring_t*)flow_ring);
5473 /* ASSERT flow_ring->inited == TRUE */
5475 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
5479 ring->inited = FALSE;
5485 /* Assumes only one index is updated at a time */
5486 /* If exactly_nitems is true, this function will allocate space for nitems or fail */
5487 /* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
5488 /* If exactly_nitems is false, this function will allocate space for nitems or less */
5489 static void *BCMFASTPATH
5490 dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
5491 bool exactly_nitems)
5493 void *ret_ptr = NULL;
5494 uint16 ring_avail_cnt;
5496 ASSERT(nitems <= ring->max_items);
5498 ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
5500 if ((ring_avail_cnt == 0) ||
5501 (exactly_nitems && (ring_avail_cnt < nitems) &&
5502 ((ring->max_items - ring->wr) >= nitems))) {
5503 DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
5504 ring->name, nitems, ring->wr, ring->rd));
5507 *alloced = MIN(nitems, ring_avail_cnt);
5509 /* Return next available space */
5510 ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
5512 /* Update write index */
5513 if ((ring->wr + *alloced) == ring->max_items) {
5515 } else if ((ring->wr + *alloced) < ring->max_items) {
5516 ring->wr += *alloced;
5518 /* Should never hit this */
5524 } /* dhd_prot_get_ring_space */
5528 * dhd_prot_ring_write_complete - Host updates the new WR index on producing
5529 * new messages in a H2D ring. The messages are flushed from cache prior to
5530 * posting the new WR index. The new WR index will be updated in the DMA index
5531 * array or directly in the dongle's ring state memory.
5532 * A PCIE doorbell will be generated to wake up the dongle.
5534 static void BCMFASTPATH
5535 dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
5538 dhd_prot_t *prot = dhd->prot;
5541 OSL_CACHE_FLUSH(p, ring->item_len * nitems);
5543 if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
5544 dhd_prot_dma_indx_set(dhd, ring->wr,
5545 H2D_DMA_INDX_WR_UPD, ring->idx);
5547 dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
5548 sizeof(uint16), RING_WR_UPD, ring->idx);
5551 /* raise h2d interrupt */
5552 prot->mb_ring_fn(dhd->bus, ring->wr);
5557 * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
5558 * from a D2H ring. The new RD index will be updated in the DMA Index array or
5559 * directly in dongle's ring state memory.
5562 dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
5564 /* update read index */
5565 /* If dma'ing h2d indices supported
5566 * update the r -indices in the
5567 * host memory o/w in TCM
5569 if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
5570 dhd_prot_dma_indx_set(dhd, ring->rd,
5571 D2H_DMA_INDX_RD_UPD, ring->idx);
5573 dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
5574 sizeof(uint16), RING_RD_UPD, ring->idx);
5580 * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
5581 * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
5582 * See dhd_prot_dma_indx_init()
5585 dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
5589 dhd_prot_t *prot = dhd->prot;
5592 case H2D_DMA_INDX_WR_UPD:
5593 ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
5594 offset = DHD_H2D_RING_OFFSET(ringid);
5597 case D2H_DMA_INDX_RD_UPD:
5598 ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
5599 offset = DHD_D2H_RING_OFFSET(ringid);
5603 DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
5608 ASSERT(prot->rw_index_sz != 0);
5609 ptr += offset * prot->rw_index_sz;
5611 *(uint16*)ptr = htol16(new_index);
5613 OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
5615 DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
5616 __FUNCTION__, new_index, type, ringid, ptr, offset));
5618 } /* dhd_prot_dma_indx_set */
5622 * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
5624 * Dongle DMAes an entire array to host memory (if the feature is enabled).
5625 * See dhd_prot_dma_indx_init()
5628 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
5633 dhd_prot_t *prot = dhd->prot;
5636 case H2D_DMA_INDX_WR_UPD:
5637 ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
5638 offset = DHD_H2D_RING_OFFSET(ringid);
5641 case H2D_DMA_INDX_RD_UPD:
5642 ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
5643 offset = DHD_H2D_RING_OFFSET(ringid);
5646 case D2H_DMA_INDX_WR_UPD:
5647 ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
5648 offset = DHD_D2H_RING_OFFSET(ringid);
5651 case D2H_DMA_INDX_RD_UPD:
5652 ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
5653 offset = DHD_D2H_RING_OFFSET(ringid);
5657 DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
5662 ASSERT(prot->rw_index_sz != 0);
5663 ptr += offset * prot->rw_index_sz;
5665 OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
5667 data = LTOH16(*((uint16*)ptr));
5669 DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
5670 __FUNCTION__, data, type, ringid, ptr, offset));
5674 } /* dhd_prot_dma_indx_get */
5677 * An array of DMA read/write indices, containing information about host rings, can be maintained
5678 * either in host memory or in device memory, dependent on preprocessor options. This function is,
5679 * dependent on these options, called during driver initialization. It reserves and initializes
5680 * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
5681 * address of these host memory blocks are communicated to the dongle later on. By reading this host
5682 * memory, the dongle learns about the state of the host rings.
5686 dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
5687 dhd_dma_buf_t *dma_buf, uint32 bufsz)
5691 if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
5694 rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
5700 dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
5703 dhd_prot_t *prot = dhd->prot;
5704 dhd_dma_buf_t *dma_buf;
5707 DHD_ERROR(("prot is not inited\n"));
5711 /* Dongle advertizes 2B or 4B RW index size */
5712 ASSERT(rw_index_sz != 0);
5713 prot->rw_index_sz = rw_index_sz;
5715 bufsz = rw_index_sz * length;
5718 case H2D_DMA_INDX_WR_BUF:
5719 dma_buf = &prot->h2d_dma_indx_wr_buf;
5720 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
5723 DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
5724 dma_buf->len, rw_index_sz, length));
5727 case H2D_DMA_INDX_RD_BUF:
5728 dma_buf = &prot->h2d_dma_indx_rd_buf;
5729 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
5732 DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
5733 dma_buf->len, rw_index_sz, length));
5736 case D2H_DMA_INDX_WR_BUF:
5737 dma_buf = &prot->d2h_dma_indx_wr_buf;
5738 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
5741 DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
5742 dma_buf->len, rw_index_sz, length));
5745 case D2H_DMA_INDX_RD_BUF:
5746 dma_buf = &prot->d2h_dma_indx_rd_buf;
5747 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
5750 DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
5751 dma_buf->len, rw_index_sz, length));
5755 DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
5756 return BCME_BADOPTION;
5762 DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
5763 __FUNCTION__, type, bufsz));
5766 } /* dhd_prot_dma_indx_init */
5770 * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
5771 * from, or NULL if there are no more messages to read.
5774 dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
5780 void *read_addr = NULL; /* address of next msg to be read in ring */
5783 DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
5784 __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
5785 (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
5787 /* Remember the read index in a variable.
5788 * This is becuase ring->rd gets updated in the end of this function
5789 * So if we have to print the exact read index from which the
5790 * message is read its not possible.
5792 ring->curr_rd = ring->rd;
5794 /* update write pointer */
5795 if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
5796 /* DMAing write/read indices supported */
5797 d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
5800 dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
5805 depth = ring->max_items;
5807 /* check for avail space, in number of ring items */
5808 items = READ_AVAIL_SPACE(wr, rd, depth);
5813 ASSERT(items < ring->max_items);
5816 * Note that there are builds where Assert translates to just printk
5817 * so, even if we had hit this condition we would never halt. Now
5818 * dhd_prot_process_msgtype can get into an big loop if this
5821 if (items >= ring->max_items) {
5822 DHD_ERROR(("\r\n======================= \r\n"));
5823 DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
5824 __FUNCTION__, ring, ring->name, ring->max_items, items));
5825 DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth));
5826 DHD_ERROR(("dhd->busstate %d bus->suspended %d bus->wait_for_d3_ack %d \r\n",
5827 dhd->busstate, dhd->bus->suspended, dhd->bus->wait_for_d3_ack));
5828 DHD_ERROR(("\r\n======================= \r\n"));
5834 /* if space is available, calculate address to be read */
5835 read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
5837 /* update read pointer */
5838 if ((ring->rd + items) >= ring->max_items) {
5844 ASSERT(ring->rd < ring->max_items);
5846 /* convert items to bytes : available_len must be 32bits */
5847 *available_len = (uint32)(items * ring->item_len);
5849 OSL_CACHE_INV(read_addr, *available_len);
5851 /* return read address */
5854 } /* dhd_prot_get_read_addr */
5856 /** Creates a flow ring and informs dongle of this event */
5858 dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
5860 tx_flowring_create_request_t *flow_create_rqst;
5861 msgbuf_ring_t *flow_ring;
5862 dhd_prot_t *prot = dhd->prot;
5863 unsigned long flags;
5865 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
5867 /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
5868 flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
5869 if (flow_ring == NULL) {
5870 DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
5871 __FUNCTION__, flow_ring_node->flowid));
5875 DHD_GENERAL_LOCK(dhd, flags);
5877 /* Request for ctrl_ring buffer space */
5878 flow_create_rqst = (tx_flowring_create_request_t *)
5879 dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
5881 if (flow_create_rqst == NULL) {
5882 dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
5883 DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
5884 __FUNCTION__, flow_ring_node->flowid));
5885 DHD_GENERAL_UNLOCK(dhd, flags);
5889 flow_ring_node->prot_info = (void *)flow_ring;
5891 /* Common msg buf hdr */
5892 flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
5893 flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
5894 flow_create_rqst->msg.request_id = htol32(0); /* TBD */
5896 flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
5897 ctrl_ring->seqnum++;
5899 /* Update flow create message */
5900 flow_create_rqst->tid = flow_ring_node->flow_info.tid;
5901 flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
5902 memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
5903 memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
5904 /* CAUTION: ring::base_addr already in Little Endian */
5905 flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
5906 flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
5907 flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM);
5908 flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
5909 DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
5910 " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
5911 MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
5912 flow_ring_node->flow_info.ifindex));
5914 /* Update the flow_ring's WRITE index */
5915 if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
5916 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
5917 H2D_DMA_INDX_WR_UPD, flow_ring->idx);
5919 dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
5920 sizeof(uint16), RING_WR_UPD, flow_ring->idx);
5923 /* update control subn ring's WR index and ring doorbell to dongle */
5924 dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
5926 DHD_GENERAL_UNLOCK(dhd, flags);
5929 } /* dhd_prot_flow_ring_create */
5931 /** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
5933 dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
5935 tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
5937 DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
5938 ltoh16(flow_create_resp->cmplt.status),
5939 ltoh16(flow_create_resp->cmplt.flow_ring_id)));
5941 dhd_bus_flow_ring_create_response(dhd->bus,
5942 ltoh16(flow_create_resp->cmplt.flow_ring_id),
5943 ltoh16(flow_create_resp->cmplt.status));
5946 /** called on e.g. flow ring delete */
5947 void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
5949 msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
5950 dhd_prot_ring_detach(dhd, flow_ring);
5951 DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
5954 void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
5955 struct bcmstrbuf *strbuf, const char * fmt)
5957 const char *default_fmt = "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d\n";
5958 msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
5960 uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
5965 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
5966 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
5967 bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
5968 ltoh32(flow_ring->base_addr.high_addr),
5969 ltoh32(flow_ring->base_addr.low_addr), dma_buf_len);
5972 void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
5974 dhd_prot_t *prot = dhd->prot;
5975 bcm_bprintf(strbuf, "CtrlPost: ");
5976 dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf, NULL);
5977 bcm_bprintf(strbuf, "CtrlCpl: ");
5978 dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf, NULL);
5980 bcm_bprintf(strbuf, "RxPost: ");
5981 bcm_bprintf(strbuf, "RBP %d ", prot->rxbufpost);
5982 dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf, NULL);
5983 bcm_bprintf(strbuf, "RxCpl: ");
5984 dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf, NULL);
5986 bcm_bprintf(strbuf, "TxCpl: ");
5987 dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf, NULL);
5988 bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail %d\n",
5989 dhd->prot->active_tx_count,
5990 DHD_PKTID_AVAIL(dhd->prot->pktid_map_handle));
5994 dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
5996 tx_flowring_delete_request_t *flow_delete_rqst;
5997 dhd_prot_t *prot = dhd->prot;
5998 unsigned long flags;
6000 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
6002 DHD_GENERAL_LOCK(dhd, flags);
6004 /* Request for ring buffer space */
6005 flow_delete_rqst = (tx_flowring_delete_request_t *)
6006 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6008 if (flow_delete_rqst == NULL) {
6009 DHD_GENERAL_UNLOCK(dhd, flags);
6010 DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
6014 /* Common msg buf hdr */
6015 flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
6016 flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
6017 flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
6019 flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
6022 /* Update Delete info */
6023 flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
6024 flow_delete_rqst->reason = htol16(BCME_OK);
6026 DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG
6027 " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
6028 MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
6029 flow_ring_node->flow_info.ifindex));
6031 /* update ring's WR index and ring doorbell to dongle */
6032 dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
6033 DHD_GENERAL_UNLOCK(dhd, flags);
6039 dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
6041 tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
6043 DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
6044 flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
6046 dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
6047 flow_delete_resp->cmplt.status);
6051 dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
6053 tx_flowring_flush_request_t *flow_flush_rqst;
6054 dhd_prot_t *prot = dhd->prot;
6055 unsigned long flags;
6057 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
6059 DHD_GENERAL_LOCK(dhd, flags);
6061 /* Request for ring buffer space */
6062 flow_flush_rqst = (tx_flowring_flush_request_t *)
6063 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6064 if (flow_flush_rqst == NULL) {
6065 DHD_GENERAL_UNLOCK(dhd, flags);
6066 DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
6070 /* Common msg buf hdr */
6071 flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
6072 flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
6073 flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
6075 flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
6078 flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
6079 flow_flush_rqst->reason = htol16(BCME_OK);
6081 DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
6083 /* update ring's WR index and ring doorbell to dongle */
6084 dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
6085 DHD_GENERAL_UNLOCK(dhd, flags);
6088 } /* dhd_prot_flow_ring_flush */
6091 dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
6093 tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
6095 DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
6096 flow_flush_resp->cmplt.status));
6098 dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
6099 flow_flush_resp->cmplt.status);
6103 * Request dongle to configure soft doorbells for D2H rings. Host populated soft
6104 * doorbell information is transferred to dongle via the d2h ring config control
6108 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
6110 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
6115 unsigned long flags;
6116 dhd_prot_t *prot = dhd->prot;
6117 ring_config_req_t *ring_config_req;
6118 bcmpcie_soft_doorbell_t *soft_doorbell;
6119 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
6120 const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
6122 /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
6123 DHD_GENERAL_LOCK(dhd, flags);
6124 msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
6126 if (msg_start == NULL) {
6127 DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
6128 __FUNCTION__, d2h_rings));
6129 DHD_GENERAL_UNLOCK(dhd, flags);
6133 msg_next = (uint8*)msg_start;
6135 for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
6137 /* position the ring_config_req into the ctrl subm ring */
6138 ring_config_req = (ring_config_req_t *)msg_next;
6140 /* Common msg header */
6141 ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
6142 ring_config_req->msg.if_id = 0;
6143 ring_config_req->msg.flags = 0;
6145 ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
6146 ctrl_ring->seqnum++;
6148 ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
6150 /* Ring Config subtype and d2h ring_id */
6151 ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
6152 ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
6154 /* Host soft doorbell configuration */
6155 soft_doorbell = &prot->soft_doorbell[ring_idx];
6157 ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
6158 ring_config_req->soft_doorbell.haddr.high =
6159 htol32(soft_doorbell->haddr.high);
6160 ring_config_req->soft_doorbell.haddr.low =
6161 htol32(soft_doorbell->haddr.low);
6162 ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
6163 ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
6165 DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
6166 __FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
6167 ring_config_req->soft_doorbell.haddr.low,
6168 ring_config_req->soft_doorbell.value));
6170 msg_next = msg_next + ctrl_ring->item_len;
6173 /* update control subn ring's WR index and ring doorbell to dongle */
6174 dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
6175 DHD_GENERAL_UNLOCK(dhd, flags);
6176 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
6180 dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg)
6182 DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
6183 __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
6184 ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
6188 dhd_prot_debug_info_print(dhd_pub_t *dhd)
6190 dhd_prot_t *prot = dhd->prot;
6191 msgbuf_ring_t *ring;
6193 uint32 intstatus = 0;
6195 uint32 mbintstatus = 0;
6196 uint32 d2h_mb_data = 0;
6199 DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
6201 ring = &prot->h2dring_ctrl_subn;
6202 dma_buf_len = ring->max_items * ring->item_len;
6203 DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
6204 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
6205 ltoh32(ring->base_addr.low_addr), dma_buf_len));
6206 DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
6207 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
6208 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
6209 DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
6211 ring = &prot->d2hring_ctrl_cpln;
6212 dma_buf_len = ring->max_items * ring->item_len;
6213 DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
6214 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
6215 ltoh32(ring->base_addr.low_addr), dma_buf_len));
6216 DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
6217 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
6218 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
6219 DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
6220 DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum));
6222 intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
6223 intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
6224 mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
6225 dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
6227 DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
6228 DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n,",
6229 intstatus, intmask, mbintstatus));
6230 DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, dhd->bus->def_intmask));
6236 dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
6241 uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
6243 OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
6244 dhd->prot->d2h_dma_indx_wr_buf.len);
6246 ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
6248 bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
6250 bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
6251 value = ltoh32(*ptr);
6252 bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
6254 value = ltoh32(*ptr);
6255 bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
6258 bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
6259 for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
6260 value = ltoh32(*ptr);
6261 bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
6265 OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
6266 dhd->prot->h2d_dma_indx_rd_buf.len);
6268 ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
6270 bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
6271 value = ltoh32(*ptr);
6272 bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
6274 value = ltoh32(*ptr);
6275 bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
6277 value = ltoh32(*ptr);
6278 bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
6284 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
6286 dhd_prot_t *prot = dhd->prot;
6287 #if DHD_DBG_SHOW_METADATA
6288 prot->metadata_dbg = val;
6290 return (uint32)prot->metadata_dbg;
6294 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
6296 dhd_prot_t *prot = dhd->prot;
6297 return (uint32)prot->metadata_dbg;
6301 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
6303 dhd_prot_t *prot = dhd->prot;
6305 prot->rx_metadata_offset = (uint16)val;
6307 prot->tx_metadata_offset = (uint16)val;
6308 return dhd_prot_metadatalen_get(dhd, rx);
6312 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
6314 dhd_prot_t *prot = dhd->prot;
6316 return prot->rx_metadata_offset;
6318 return prot->tx_metadata_offset;
6321 /** optimization to write "n" tx items at a time to ring */
6323 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
6325 dhd_prot_t *prot = dhd->prot;
6327 prot->txp_threshold = (uint16)val;
6328 val = prot->txp_threshold;
6332 #ifdef DHD_RX_CHAINING
6334 static INLINE void BCMFASTPATH
6335 dhd_rxchain_reset(rxchain_info_t *rxchain)
6337 rxchain->pkt_count = 0;
6340 static void BCMFASTPATH
6341 dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
6345 dhd_prot_t *prot = dhd->prot;
6346 rxchain_info_t *rxchain = &prot->rxchain;
6348 ASSERT(!PKTISCHAINED(pkt));
6349 ASSERT(PKTCLINK(pkt) == NULL);
6350 ASSERT(PKTCGETATTR(pkt) == 0);
6352 eh = PKTDATA(dhd->osh, pkt);
6353 prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
6355 if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
6356 rxchain->h_da, rxchain->h_prio))) {
6357 /* Different flow - First release the existing chain */
6358 dhd_rxchain_commit(dhd);
6361 /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
6362 /* so that the chain can be handed off to CTF bridge as is. */
6363 if (rxchain->pkt_count == 0) {
6364 /* First packet in chain */
6365 rxchain->pkthead = rxchain->pkttail = pkt;
6367 /* Keep a copy of ptr to ether_da, ether_sa and prio */
6368 rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
6369 rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
6370 rxchain->h_prio = prio;
6371 rxchain->ifidx = ifidx;
6372 rxchain->pkt_count++;
6374 /* Same flow - keep chaining */
6375 PKTSETCLINK(rxchain->pkttail, pkt);
6376 rxchain->pkttail = pkt;
6377 rxchain->pkt_count++;
6380 if ((!ETHER_ISMULTI(rxchain->h_da)) &&
6381 ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
6382 (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
6383 PKTSETCHAINED(dhd->osh, pkt);
6384 PKTCINCRCNT(rxchain->pkthead);
6385 PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
6387 dhd_rxchain_commit(dhd);
6391 /* If we have hit the max chain length, dispatch the chain and reset */
6392 if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
6393 dhd_rxchain_commit(dhd);
6397 static void BCMFASTPATH
6398 dhd_rxchain_commit(dhd_pub_t *dhd)
6400 dhd_prot_t *prot = dhd->prot;
6401 rxchain_info_t *rxchain = &prot->rxchain;
6403 if (rxchain->pkt_count == 0)
6406 /* Release the packets to dhd_linux */
6407 dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
6409 /* Reset the chain */
6410 dhd_rxchain_reset(rxchain);
6413 #endif /* DHD_RX_CHAINING */