2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 * Support for Copy Engine hardware, which is mainly used for
25 * communication between Host and Target over a PCIe interconnect.
29 * A single CopyEngine (CE) comprises two "rings":
33 * Each ring consists of a number of descriptors which specify
34 * an address, length, and meta-data.
36 * Typically, one side of the PCIe interconnect (Host or Target)
37 * controls one ring and the other side controls the other ring.
38 * The source side chooses when to initiate a transfer and it
39 * chooses what to send (buffer address, length). The destination
40 * side keeps a supply of "anonymous receive buffers" available and
41 * it handles incoming data as it arrives (when the destination
42 * recieves an interrupt).
44 * The sender may send a simple buffer (address/length) or it may
45 * send a small list of buffers. When a small list is sent, hardware
46 * "gathers" these and they end up in a single destination buffer
47 * with a single interrupt.
49 * There are several "contexts" managed by this layer -- more, it
50 * may seem -- than should be needed. These are provided mainly for
51 * maximum flexibility and especially to facilitate a simpler HIF
52 * implementation. There are per-CopyEngine recv, send, and watermark
53 * contexts. These are supplied by the caller when a recv, send,
54 * or watermark handler is established and they are echoed back to
55 * the caller when the respective callbacks are invoked. There is
56 * also a per-transfer context supplied by the caller when a buffer
57 * (or sendlist) is sent and when a buffer is enqueued for recv.
58 * These per-transfer contexts are echoed back to the caller when
59 * the buffer is sent/received.
62 static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
66 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
69 static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
72 return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
75 static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
79 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
82 static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
85 return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
88 static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
91 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
94 static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
98 ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
101 static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
105 ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
108 static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
112 u32 ctrl1_addr = ath10k_pci_read32((ar),
113 (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
115 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
116 (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
117 CE_CTRL1_DMAX_LENGTH_SET(n));
120 static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
124 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
126 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
127 (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
128 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
131 static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
135 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
137 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
138 (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
139 CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
142 static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
145 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
148 static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
152 ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
155 static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
159 ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
162 static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
166 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
168 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
169 (addr & ~SRC_WATERMARK_HIGH_MASK) |
170 SRC_WATERMARK_HIGH_SET(n));
173 static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
177 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
179 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
180 (addr & ~SRC_WATERMARK_LOW_MASK) |
181 SRC_WATERMARK_LOW_SET(n));
184 static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
188 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
190 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
191 (addr & ~DST_WATERMARK_HIGH_MASK) |
192 DST_WATERMARK_HIGH_SET(n));
195 static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
199 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
201 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
202 (addr & ~DST_WATERMARK_LOW_MASK) |
203 DST_WATERMARK_LOW_SET(n));
206 static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
209 u32 host_ie_addr = ath10k_pci_read32(ar,
210 ce_ctrl_addr + HOST_IE_ADDRESS);
212 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
213 host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
216 static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
219 u32 host_ie_addr = ath10k_pci_read32(ar,
220 ce_ctrl_addr + HOST_IE_ADDRESS);
222 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
223 host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
226 static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
229 u32 host_ie_addr = ath10k_pci_read32(ar,
230 ce_ctrl_addr + HOST_IE_ADDRESS);
232 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
233 host_ie_addr & ~CE_WATERMARK_MASK);
236 static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
239 u32 misc_ie_addr = ath10k_pci_read32(ar,
240 ce_ctrl_addr + MISC_IE_ADDRESS);
242 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
243 misc_ie_addr | CE_ERROR_MASK);
246 static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
249 u32 misc_ie_addr = ath10k_pci_read32(ar,
250 ce_ctrl_addr + MISC_IE_ADDRESS);
252 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
253 misc_ie_addr & ~CE_ERROR_MASK);
256 static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
260 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
264 * Guts of ath10k_ce_send, used by both ath10k_ce_send and
265 * ath10k_ce_sendlist_send.
266 * The caller takes responsibility for any needed locking.
268 int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
269 void *per_transfer_context,
272 unsigned int transfer_id,
275 struct ath10k *ar = ce_state->ar;
276 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
277 struct ce_desc *desc, *sdesc;
278 unsigned int nentries_mask = src_ring->nentries_mask;
279 unsigned int sw_index = src_ring->sw_index;
280 unsigned int write_index = src_ring->write_index;
281 u32 ctrl_addr = ce_state->ctrl_addr;
285 if (nbytes > ce_state->src_sz_max)
286 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
287 __func__, nbytes, ce_state->src_sz_max);
289 if (unlikely(CE_RING_DELTA(nentries_mask,
290 write_index, sw_index - 1) <= 0)) {
295 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
297 sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
299 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
301 if (flags & CE_SEND_FLAG_GATHER)
302 desc_flags |= CE_DESC_FLAGS_GATHER;
303 if (flags & CE_SEND_FLAG_BYTE_SWAP)
304 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
306 sdesc->addr = __cpu_to_le32(buffer);
307 sdesc->nbytes = __cpu_to_le16(nbytes);
308 sdesc->flags = __cpu_to_le16(desc_flags);
312 src_ring->per_transfer_context[write_index] = per_transfer_context;
314 /* Update Source Ring Write Index */
315 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
318 if (!(flags & CE_SEND_FLAG_GATHER))
319 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
321 src_ring->write_index = write_index;
326 void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
328 struct ath10k *ar = pipe->ar;
329 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
330 struct ath10k_ce_ring *src_ring = pipe->src_ring;
331 u32 ctrl_addr = pipe->ctrl_addr;
333 lockdep_assert_held(&ar_pci->ce_lock);
336 * This function must be called only if there is an incomplete
337 * scatter-gather transfer (before index register is updated)
338 * that needs to be cleaned up.
340 if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
343 if (WARN_ON_ONCE(src_ring->write_index ==
344 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
347 src_ring->write_index--;
348 src_ring->write_index &= src_ring->nentries_mask;
350 src_ring->per_transfer_context[src_ring->write_index] = NULL;
353 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
354 void *per_transfer_context,
357 unsigned int transfer_id,
360 struct ath10k *ar = ce_state->ar;
361 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
364 spin_lock_bh(&ar_pci->ce_lock);
365 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
366 buffer, nbytes, transfer_id, flags);
367 spin_unlock_bh(&ar_pci->ce_lock);
372 int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
374 struct ath10k *ar = pipe->ar;
375 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
378 spin_lock_bh(&ar_pci->ce_lock);
379 delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
380 pipe->src_ring->write_index,
381 pipe->src_ring->sw_index - 1);
382 spin_unlock_bh(&ar_pci->ce_lock);
387 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
389 struct ath10k *ar = pipe->ar;
390 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
391 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
392 unsigned int nentries_mask = dest_ring->nentries_mask;
393 unsigned int write_index = dest_ring->write_index;
394 unsigned int sw_index = dest_ring->sw_index;
396 lockdep_assert_held(&ar_pci->ce_lock);
398 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
401 int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
403 struct ath10k *ar = pipe->ar;
404 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
405 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
406 unsigned int nentries_mask = dest_ring->nentries_mask;
407 unsigned int write_index = dest_ring->write_index;
408 unsigned int sw_index = dest_ring->sw_index;
409 struct ce_desc *base = dest_ring->base_addr_owner_space;
410 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
411 u32 ctrl_addr = pipe->ctrl_addr;
413 lockdep_assert_held(&ar_pci->ce_lock);
415 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
418 desc->addr = __cpu_to_le32(paddr);
421 dest_ring->per_transfer_context[write_index] = ctx;
422 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
423 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
424 dest_ring->write_index = write_index;
429 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
431 struct ath10k *ar = pipe->ar;
432 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
435 spin_lock_bh(&ar_pci->ce_lock);
436 ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
437 spin_unlock_bh(&ar_pci->ce_lock);
443 * Guts of ath10k_ce_completed_recv_next.
444 * The caller takes responsibility for any necessary locking.
446 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
447 void **per_transfer_contextp,
449 unsigned int *nbytesp,
450 unsigned int *transfer_idp,
451 unsigned int *flagsp)
453 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
454 unsigned int nentries_mask = dest_ring->nentries_mask;
455 struct ath10k *ar = ce_state->ar;
456 unsigned int sw_index = dest_ring->sw_index;
458 struct ce_desc *base = dest_ring->base_addr_owner_space;
459 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
460 struct ce_desc sdesc;
463 /* Copy in one go for performance reasons */
466 nbytes = __le16_to_cpu(sdesc.nbytes);
469 * This closes a relatively unusual race where the Host
470 * sees the updated DRRI before the update to the
471 * corresponding descriptor has completed. We treat this
472 * as a descriptor that is not yet done.
479 /* Return data from completed destination descriptor */
480 *bufferp = __le32_to_cpu(sdesc.addr);
482 *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
484 if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
485 *flagsp = CE_RECV_FLAG_SWAPPED;
489 if (per_transfer_contextp)
490 *per_transfer_contextp =
491 dest_ring->per_transfer_context[sw_index];
494 dest_ring->per_transfer_context[sw_index] = NULL;
496 /* Update sw_index */
497 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
498 dest_ring->sw_index = sw_index;
503 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
504 void **per_transfer_contextp,
506 unsigned int *nbytesp,
507 unsigned int *transfer_idp,
508 unsigned int *flagsp)
510 struct ath10k *ar = ce_state->ar;
511 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
514 spin_lock_bh(&ar_pci->ce_lock);
515 ret = ath10k_ce_completed_recv_next_nolock(ce_state,
516 per_transfer_contextp,
518 transfer_idp, flagsp);
519 spin_unlock_bh(&ar_pci->ce_lock);
524 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
525 void **per_transfer_contextp,
528 struct ath10k_ce_ring *dest_ring;
529 unsigned int nentries_mask;
530 unsigned int sw_index;
531 unsigned int write_index;
534 struct ath10k_pci *ar_pci;
536 dest_ring = ce_state->dest_ring;
542 ar_pci = ath10k_pci_priv(ar);
544 spin_lock_bh(&ar_pci->ce_lock);
546 nentries_mask = dest_ring->nentries_mask;
547 sw_index = dest_ring->sw_index;
548 write_index = dest_ring->write_index;
549 if (write_index != sw_index) {
550 struct ce_desc *base = dest_ring->base_addr_owner_space;
551 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
553 /* Return data from completed destination descriptor */
554 *bufferp = __le32_to_cpu(desc->addr);
556 if (per_transfer_contextp)
557 *per_transfer_contextp =
558 dest_ring->per_transfer_context[sw_index];
561 dest_ring->per_transfer_context[sw_index] = NULL;
564 /* Update sw_index */
565 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
566 dest_ring->sw_index = sw_index;
572 spin_unlock_bh(&ar_pci->ce_lock);
578 * Guts of ath10k_ce_completed_send_next.
579 * The caller takes responsibility for any necessary locking.
581 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
582 void **per_transfer_contextp,
584 unsigned int *nbytesp,
585 unsigned int *transfer_idp)
587 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
588 u32 ctrl_addr = ce_state->ctrl_addr;
589 struct ath10k *ar = ce_state->ar;
590 unsigned int nentries_mask = src_ring->nentries_mask;
591 unsigned int sw_index = src_ring->sw_index;
592 struct ce_desc *sdesc, *sbase;
593 unsigned int read_index;
595 if (src_ring->hw_index == sw_index) {
597 * The SW completion index has caught up with the cached
598 * version of the HW completion index.
599 * Update the cached HW completion index to see whether
600 * the SW has really caught up to the HW, or if the cached
601 * value of the HW index has become stale.
604 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
605 if (read_index == 0xffffffff)
608 read_index &= nentries_mask;
609 src_ring->hw_index = read_index;
612 read_index = src_ring->hw_index;
614 if (read_index == sw_index)
617 sbase = src_ring->shadow_base;
618 sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
620 /* Return data from completed source descriptor */
621 *bufferp = __le32_to_cpu(sdesc->addr);
622 *nbytesp = __le16_to_cpu(sdesc->nbytes);
623 *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
624 CE_DESC_FLAGS_META_DATA);
626 if (per_transfer_contextp)
627 *per_transfer_contextp =
628 src_ring->per_transfer_context[sw_index];
631 src_ring->per_transfer_context[sw_index] = NULL;
633 /* Update sw_index */
634 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
635 src_ring->sw_index = sw_index;
640 /* NB: Modeled after ath10k_ce_completed_send_next */
641 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
642 void **per_transfer_contextp,
644 unsigned int *nbytesp,
645 unsigned int *transfer_idp)
647 struct ath10k_ce_ring *src_ring;
648 unsigned int nentries_mask;
649 unsigned int sw_index;
650 unsigned int write_index;
653 struct ath10k_pci *ar_pci;
655 src_ring = ce_state->src_ring;
661 ar_pci = ath10k_pci_priv(ar);
663 spin_lock_bh(&ar_pci->ce_lock);
665 nentries_mask = src_ring->nentries_mask;
666 sw_index = src_ring->sw_index;
667 write_index = src_ring->write_index;
669 if (write_index != sw_index) {
670 struct ce_desc *base = src_ring->base_addr_owner_space;
671 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
673 /* Return data from completed source descriptor */
674 *bufferp = __le32_to_cpu(desc->addr);
675 *nbytesp = __le16_to_cpu(desc->nbytes);
676 *transfer_idp = MS(__le16_to_cpu(desc->flags),
677 CE_DESC_FLAGS_META_DATA);
679 if (per_transfer_contextp)
680 *per_transfer_contextp =
681 src_ring->per_transfer_context[sw_index];
684 src_ring->per_transfer_context[sw_index] = NULL;
686 /* Update sw_index */
687 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
688 src_ring->sw_index = sw_index;
694 spin_unlock_bh(&ar_pci->ce_lock);
699 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
700 void **per_transfer_contextp,
702 unsigned int *nbytesp,
703 unsigned int *transfer_idp)
705 struct ath10k *ar = ce_state->ar;
706 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
709 spin_lock_bh(&ar_pci->ce_lock);
710 ret = ath10k_ce_completed_send_next_nolock(ce_state,
711 per_transfer_contextp,
714 spin_unlock_bh(&ar_pci->ce_lock);
720 * Guts of interrupt handler for per-engine interrupts on a particular CE.
722 * Invokes registered callbacks for recv_complete,
723 * send_complete, and watermarks.
725 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
727 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
728 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
729 u32 ctrl_addr = ce_state->ctrl_addr;
731 spin_lock_bh(&ar_pci->ce_lock);
733 /* Clear the copy-complete interrupts that will be handled here. */
734 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
735 HOST_IS_COPY_COMPLETE_MASK);
737 spin_unlock_bh(&ar_pci->ce_lock);
739 if (ce_state->recv_cb)
740 ce_state->recv_cb(ce_state);
742 if (ce_state->send_cb)
743 ce_state->send_cb(ce_state);
745 spin_lock_bh(&ar_pci->ce_lock);
748 * Misc CE interrupts are not being handled, but still need
751 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
753 spin_unlock_bh(&ar_pci->ce_lock);
757 * Handler for per-engine interrupts on ALL active CEs.
758 * This is used in cases where the system is sharing a
759 * single interrput for all CEs
762 void ath10k_ce_per_engine_service_any(struct ath10k *ar)
767 intr_summary = CE_INTERRUPT_SUMMARY(ar);
769 for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
770 if (intr_summary & (1 << ce_id))
771 intr_summary &= ~(1 << ce_id);
773 /* no intr pending on this CE */
776 ath10k_ce_per_engine_service(ar, ce_id);
781 * Adjust interrupts for the copy complete handler.
782 * If it's needed for either send or recv, then unmask
783 * this interrupt; otherwise, mask it.
785 * Called with ce_lock held.
787 static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
789 u32 ctrl_addr = ce_state->ctrl_addr;
790 struct ath10k *ar = ce_state->ar;
791 bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
793 if ((!disable_copy_compl_intr) &&
794 (ce_state->send_cb || ce_state->recv_cb))
795 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
797 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
799 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
802 int ath10k_ce_disable_interrupts(struct ath10k *ar)
806 for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
807 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
809 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
810 ath10k_ce_error_intr_disable(ar, ctrl_addr);
811 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
817 void ath10k_ce_enable_interrupts(struct ath10k *ar)
819 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
822 /* Skip the last copy engine, CE7 the diagnostic window, as that
823 * uses polling and isn't initialized for interrupts.
825 for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++)
826 ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
829 static int ath10k_ce_init_src_ring(struct ath10k *ar,
831 const struct ce_attr *attr)
833 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
834 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
835 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
836 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
838 nentries = roundup_pow_of_two(attr->src_nentries);
840 memset(src_ring->base_addr_owner_space, 0,
841 nentries * sizeof(struct ce_desc));
843 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
844 src_ring->sw_index &= src_ring->nentries_mask;
845 src_ring->hw_index = src_ring->sw_index;
847 src_ring->write_index =
848 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
849 src_ring->write_index &= src_ring->nentries_mask;
851 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
852 src_ring->base_addr_ce_space);
853 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
854 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
855 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
856 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
857 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
859 ath10k_dbg(ar, ATH10K_DBG_BOOT,
860 "boot init ce src ring id %d entries %d base_addr %p\n",
861 ce_id, nentries, src_ring->base_addr_owner_space);
866 static int ath10k_ce_init_dest_ring(struct ath10k *ar,
868 const struct ce_attr *attr)
870 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
871 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
872 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
873 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
875 nentries = roundup_pow_of_two(attr->dest_nentries);
877 memset(dest_ring->base_addr_owner_space, 0,
878 nentries * sizeof(struct ce_desc));
880 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
881 dest_ring->sw_index &= dest_ring->nentries_mask;
882 dest_ring->write_index =
883 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
884 dest_ring->write_index &= dest_ring->nentries_mask;
886 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
887 dest_ring->base_addr_ce_space);
888 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
889 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
890 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
891 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
893 ath10k_dbg(ar, ATH10K_DBG_BOOT,
894 "boot ce dest ring id %d entries %d base_addr %p\n",
895 ce_id, nentries, dest_ring->base_addr_owner_space);
900 static struct ath10k_ce_ring *
901 ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
902 const struct ce_attr *attr)
904 struct ath10k_ce_ring *src_ring;
905 u32 nentries = attr->src_nentries;
906 dma_addr_t base_addr;
908 nentries = roundup_pow_of_two(nentries);
910 src_ring = kzalloc(sizeof(*src_ring) +
912 sizeof(*src_ring->per_transfer_context)),
914 if (src_ring == NULL)
915 return ERR_PTR(-ENOMEM);
917 src_ring->nentries = nentries;
918 src_ring->nentries_mask = nentries - 1;
921 * Legacy platforms that do not support cache
922 * coherent DMA are unsupported
924 src_ring->base_addr_owner_space_unaligned =
925 dma_alloc_coherent(ar->dev,
926 (nentries * sizeof(struct ce_desc) +
928 &base_addr, GFP_KERNEL);
929 if (!src_ring->base_addr_owner_space_unaligned) {
931 return ERR_PTR(-ENOMEM);
934 src_ring->base_addr_ce_space_unaligned = base_addr;
936 src_ring->base_addr_owner_space = PTR_ALIGN(
937 src_ring->base_addr_owner_space_unaligned,
939 src_ring->base_addr_ce_space = ALIGN(
940 src_ring->base_addr_ce_space_unaligned,
944 * Also allocate a shadow src ring in regular
945 * mem to use for faster access.
947 src_ring->shadow_base_unaligned =
948 kmalloc((nentries * sizeof(struct ce_desc) +
949 CE_DESC_RING_ALIGN), GFP_KERNEL);
950 if (!src_ring->shadow_base_unaligned) {
951 dma_free_coherent(ar->dev,
952 (nentries * sizeof(struct ce_desc) +
954 src_ring->base_addr_owner_space,
955 src_ring->base_addr_ce_space);
957 return ERR_PTR(-ENOMEM);
960 src_ring->shadow_base = PTR_ALIGN(
961 src_ring->shadow_base_unaligned,
967 static struct ath10k_ce_ring *
968 ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
969 const struct ce_attr *attr)
971 struct ath10k_ce_ring *dest_ring;
973 dma_addr_t base_addr;
975 nentries = roundup_pow_of_two(attr->dest_nentries);
977 dest_ring = kzalloc(sizeof(*dest_ring) +
979 sizeof(*dest_ring->per_transfer_context)),
981 if (dest_ring == NULL)
982 return ERR_PTR(-ENOMEM);
984 dest_ring->nentries = nentries;
985 dest_ring->nentries_mask = nentries - 1;
988 * Legacy platforms that do not support cache
989 * coherent DMA are unsupported
991 dest_ring->base_addr_owner_space_unaligned =
992 dma_alloc_coherent(ar->dev,
993 (nentries * sizeof(struct ce_desc) +
995 &base_addr, GFP_KERNEL);
996 if (!dest_ring->base_addr_owner_space_unaligned) {
998 return ERR_PTR(-ENOMEM);
1001 dest_ring->base_addr_ce_space_unaligned = base_addr;
1004 * Correctly initialize memory to 0 to prevent garbage
1005 * data crashing system when download firmware
1007 memset(dest_ring->base_addr_owner_space_unaligned, 0,
1008 nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
1010 dest_ring->base_addr_owner_space = PTR_ALIGN(
1011 dest_ring->base_addr_owner_space_unaligned,
1012 CE_DESC_RING_ALIGN);
1013 dest_ring->base_addr_ce_space = ALIGN(
1014 dest_ring->base_addr_ce_space_unaligned,
1015 CE_DESC_RING_ALIGN);
1021 * Initialize a Copy Engine based on caller-supplied attributes.
1022 * This may be called once to initialize both source and destination
1023 * rings or it may be called twice for separate source and destination
1024 * initialization. It may be that only one side or the other is
1025 * initialized by software/firmware.
1027 int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
1028 const struct ce_attr *attr)
1032 if (attr->src_nentries) {
1033 ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
1035 ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
1041 if (attr->dest_nentries) {
1042 ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1044 ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
1053 static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1055 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1057 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
1058 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
1059 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
1060 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
1063 static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1065 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1067 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
1068 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
1069 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
1072 void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
1074 ath10k_ce_deinit_src_ring(ar, ce_id);
1075 ath10k_ce_deinit_dest_ring(ar, ce_id);
1078 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1079 const struct ce_attr *attr,
1080 void (*send_cb)(struct ath10k_ce_pipe *),
1081 void (*recv_cb)(struct ath10k_ce_pipe *))
1083 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1084 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1088 * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
1089 * additional TX locking checks.
1091 * For the lack of a better place do the check here.
1093 BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
1094 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1095 BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
1096 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1097 BUILD_BUG_ON(2*TARGET_TLV_NUM_MSDU_DESC >
1098 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1101 ce_state->id = ce_id;
1102 ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1103 ce_state->attr_flags = attr->flags;
1104 ce_state->src_sz_max = attr->src_sz_max;
1106 if (attr->src_nentries)
1107 ce_state->send_cb = send_cb;
1109 if (attr->dest_nentries)
1110 ce_state->recv_cb = recv_cb;
1112 if (attr->src_nentries) {
1113 ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
1114 if (IS_ERR(ce_state->src_ring)) {
1115 ret = PTR_ERR(ce_state->src_ring);
1116 ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
1118 ce_state->src_ring = NULL;
1123 if (attr->dest_nentries) {
1124 ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
1126 if (IS_ERR(ce_state->dest_ring)) {
1127 ret = PTR_ERR(ce_state->dest_ring);
1128 ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
1130 ce_state->dest_ring = NULL;
1138 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1140 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1141 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1143 if (ce_state->src_ring) {
1144 kfree(ce_state->src_ring->shadow_base_unaligned);
1145 dma_free_coherent(ar->dev,
1146 (ce_state->src_ring->nentries *
1147 sizeof(struct ce_desc) +
1148 CE_DESC_RING_ALIGN),
1149 ce_state->src_ring->base_addr_owner_space,
1150 ce_state->src_ring->base_addr_ce_space);
1151 kfree(ce_state->src_ring);
1154 if (ce_state->dest_ring) {
1155 dma_free_coherent(ar->dev,
1156 (ce_state->dest_ring->nentries *
1157 sizeof(struct ce_desc) +
1158 CE_DESC_RING_ALIGN),
1159 ce_state->dest_ring->base_addr_owner_space,
1160 ce_state->dest_ring->base_addr_ce_space);
1161 kfree(ce_state->dest_ring);
1164 ce_state->src_ring = NULL;
1165 ce_state->dest_ring = NULL;