2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
27 #include "targaddrs.h"
36 enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
42 enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
47 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
48 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
50 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
51 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
53 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
54 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
56 /* how long wait to wait for target to initialise, in ms */
57 #define ATH10K_PCI_TARGET_WAIT 3000
58 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
60 #define QCA988X_2_0_DEVICE_ID (0x003c)
61 #define QCA6174_2_1_DEVICE_ID (0x003e)
62 #define QCA99X0_2_0_DEVICE_ID (0x0040)
64 static const struct pci_device_id ath10k_pci_id_table[] = {
65 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
66 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
70 static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
71 /* QCA988X pre 2.0 chips are not supported because they need some nasty
72 * hacks. ath10k doesn't have them and these devices crash horribly
75 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
76 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
77 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
78 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
79 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
80 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
83 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
84 static int ath10k_pci_cold_reset(struct ath10k *ar);
85 static int ath10k_pci_warm_reset(struct ath10k *ar);
86 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
87 static int ath10k_pci_init_irq(struct ath10k *ar);
88 static int ath10k_pci_deinit_irq(struct ath10k *ar);
89 static int ath10k_pci_request_irq(struct ath10k *ar);
90 static void ath10k_pci_free_irq(struct ath10k *ar);
91 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
92 struct ath10k_ce_pipe *rx_pipe,
93 struct bmi_xfer *xfer);
95 static const struct ce_attr host_ce_config_wlan[] = {
96 /* CE0: host->target HTC control and raw streams */
98 .flags = CE_ATTR_FLAGS,
104 /* CE1: target->host HTT + HTC control */
106 .flags = CE_ATTR_FLAGS,
109 .dest_nentries = 512,
112 /* CE2: target->host WMI */
114 .flags = CE_ATTR_FLAGS,
117 .dest_nentries = 128,
120 /* CE3: host->target WMI */
122 .flags = CE_ATTR_FLAGS,
128 /* CE4: host->target HTT */
130 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
131 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
138 .flags = CE_ATTR_FLAGS,
144 /* CE6: target autonomous hif_memcpy */
146 .flags = CE_ATTR_FLAGS,
152 /* CE7: ce_diag, the Diagnostic Window */
154 .flags = CE_ATTR_FLAGS,
156 .src_sz_max = DIAG_TRANSFER_LIMIT,
161 /* Target firmware's Copy Engine configuration. */
162 static const struct ce_pipe_config target_ce_config_wlan[] = {
163 /* CE0: host->target HTC control and raw streams */
165 .pipenum = __cpu_to_le32(0),
166 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
167 .nentries = __cpu_to_le32(32),
168 .nbytes_max = __cpu_to_le32(256),
169 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
170 .reserved = __cpu_to_le32(0),
173 /* CE1: target->host HTT + HTC control */
175 .pipenum = __cpu_to_le32(1),
176 .pipedir = __cpu_to_le32(PIPEDIR_IN),
177 .nentries = __cpu_to_le32(32),
178 .nbytes_max = __cpu_to_le32(2048),
179 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
180 .reserved = __cpu_to_le32(0),
183 /* CE2: target->host WMI */
185 .pipenum = __cpu_to_le32(2),
186 .pipedir = __cpu_to_le32(PIPEDIR_IN),
187 .nentries = __cpu_to_le32(64),
188 .nbytes_max = __cpu_to_le32(2048),
189 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
190 .reserved = __cpu_to_le32(0),
193 /* CE3: host->target WMI */
195 .pipenum = __cpu_to_le32(3),
196 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
197 .nentries = __cpu_to_le32(32),
198 .nbytes_max = __cpu_to_le32(2048),
199 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
200 .reserved = __cpu_to_le32(0),
203 /* CE4: host->target HTT */
205 .pipenum = __cpu_to_le32(4),
206 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
207 .nentries = __cpu_to_le32(256),
208 .nbytes_max = __cpu_to_le32(256),
209 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
210 .reserved = __cpu_to_le32(0),
213 /* NB: 50% of src nentries, since tx has 2 frags */
217 .pipenum = __cpu_to_le32(5),
218 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
219 .nentries = __cpu_to_le32(32),
220 .nbytes_max = __cpu_to_le32(2048),
221 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
222 .reserved = __cpu_to_le32(0),
225 /* CE6: Reserved for target autonomous hif_memcpy */
227 .pipenum = __cpu_to_le32(6),
228 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
229 .nentries = __cpu_to_le32(32),
230 .nbytes_max = __cpu_to_le32(4096),
231 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
232 .reserved = __cpu_to_le32(0),
235 /* CE7 used only by Host */
239 * Map from service/endpoint to Copy Engine.
240 * This table is derived from the CE_PCI TABLE, above.
241 * It is passed to the Target at startup for use by firmware.
243 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
245 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
246 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
250 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
251 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
255 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
256 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
260 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
261 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
265 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
266 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
270 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
271 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
275 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
276 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
280 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
281 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
285 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
286 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
290 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
291 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
295 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
296 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
300 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
301 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
305 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
306 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
310 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
311 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
315 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
316 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
320 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
321 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
325 /* (Additions here) */
334 static bool ath10k_pci_is_awake(struct ath10k *ar)
336 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
337 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
340 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
343 static void __ath10k_pci_wake(struct ath10k *ar)
345 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
347 lockdep_assert_held(&ar_pci->ps_lock);
349 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
350 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
352 iowrite32(PCIE_SOC_WAKE_V_MASK,
353 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
354 PCIE_SOC_WAKE_ADDRESS);
357 static void __ath10k_pci_sleep(struct ath10k *ar)
359 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
361 lockdep_assert_held(&ar_pci->ps_lock);
363 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
364 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
366 iowrite32(PCIE_SOC_WAKE_RESET,
367 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
368 PCIE_SOC_WAKE_ADDRESS);
369 ar_pci->ps_awake = false;
372 static int ath10k_pci_wake_wait(struct ath10k *ar)
377 while (tot_delay < PCIE_WAKE_TIMEOUT) {
378 if (ath10k_pci_is_awake(ar))
382 tot_delay += curr_delay;
391 static int ath10k_pci_wake(struct ath10k *ar)
393 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
397 spin_lock_irqsave(&ar_pci->ps_lock, flags);
399 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
400 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
402 /* This function can be called very frequently. To avoid excessive
403 * CPU stalls for MMIO reads use a cache var to hold the device state.
405 if (!ar_pci->ps_awake) {
406 __ath10k_pci_wake(ar);
408 ret = ath10k_pci_wake_wait(ar);
410 ar_pci->ps_awake = true;
414 ar_pci->ps_wake_refcount++;
415 WARN_ON(ar_pci->ps_wake_refcount == 0);
418 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
423 static void ath10k_pci_sleep(struct ath10k *ar)
425 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
428 spin_lock_irqsave(&ar_pci->ps_lock, flags);
430 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
431 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
433 if (WARN_ON(ar_pci->ps_wake_refcount == 0))
436 ar_pci->ps_wake_refcount--;
438 mod_timer(&ar_pci->ps_timer, jiffies +
439 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
442 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
445 static void ath10k_pci_ps_timer(unsigned long ptr)
447 struct ath10k *ar = (void *)ptr;
448 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
451 spin_lock_irqsave(&ar_pci->ps_lock, flags);
453 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
454 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
456 if (ar_pci->ps_wake_refcount > 0)
459 __ath10k_pci_sleep(ar);
462 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
465 static void ath10k_pci_sleep_sync(struct ath10k *ar)
467 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
470 del_timer_sync(&ar_pci->ps_timer);
472 spin_lock_irqsave(&ar_pci->ps_lock, flags);
473 WARN_ON(ar_pci->ps_wake_refcount > 0);
474 __ath10k_pci_sleep(ar);
475 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
478 void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
480 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
483 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
484 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
485 offset, offset + sizeof(value), ar_pci->mem_len);
489 ret = ath10k_pci_wake(ar);
491 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
496 iowrite32(value, ar_pci->mem + offset);
497 ath10k_pci_sleep(ar);
500 u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
502 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
506 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
507 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
508 offset, offset + sizeof(val), ar_pci->mem_len);
512 ret = ath10k_pci_wake(ar);
514 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
519 val = ioread32(ar_pci->mem + offset);
520 ath10k_pci_sleep(ar);
525 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
527 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
530 void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
532 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
535 u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
537 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
540 void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
542 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
545 static bool ath10k_pci_irq_pending(struct ath10k *ar)
549 /* Check if the shared legacy irq is for us */
550 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
551 PCIE_INTR_CAUSE_ADDRESS);
552 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
558 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
560 /* IMPORTANT: INTR_CLR register has to be set after
561 * INTR_ENABLE is set to 0, otherwise interrupt can not be
563 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
565 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
566 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
568 /* IMPORTANT: this extra read transaction is required to
569 * flush the posted write buffer. */
570 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
571 PCIE_INTR_ENABLE_ADDRESS);
574 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
576 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
577 PCIE_INTR_ENABLE_ADDRESS,
578 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
580 /* IMPORTANT: this extra read transaction is required to
581 * flush the posted write buffer. */
582 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
583 PCIE_INTR_ENABLE_ADDRESS);
586 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
588 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
590 if (ar_pci->num_msi_intrs > 1)
593 if (ar_pci->num_msi_intrs == 1)
599 static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
601 struct ath10k *ar = pipe->hif_ce_state;
602 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
603 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
608 lockdep_assert_held(&ar_pci->ce_lock);
610 skb = dev_alloc_skb(pipe->buf_sz);
614 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
616 paddr = dma_map_single(ar->dev, skb->data,
617 skb->len + skb_tailroom(skb),
619 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
620 ath10k_warn(ar, "failed to dma map pci rx buf\n");
621 dev_kfree_skb_any(skb);
625 ATH10K_SKB_RXCB(skb)->paddr = paddr;
627 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
629 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
630 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
632 dev_kfree_skb_any(skb);
639 static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
641 struct ath10k *ar = pipe->hif_ce_state;
642 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
643 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
646 lockdep_assert_held(&ar_pci->ce_lock);
648 if (pipe->buf_sz == 0)
651 if (!ce_pipe->dest_ring)
654 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
656 ret = __ath10k_pci_rx_post_buf(pipe);
658 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
659 mod_timer(&ar_pci->rx_post_retry, jiffies +
660 ATH10K_PCI_RX_POST_RETRY_MS);
666 static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
668 struct ath10k *ar = pipe->hif_ce_state;
669 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
671 spin_lock_bh(&ar_pci->ce_lock);
672 __ath10k_pci_rx_post_pipe(pipe);
673 spin_unlock_bh(&ar_pci->ce_lock);
676 static void ath10k_pci_rx_post(struct ath10k *ar)
678 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
681 spin_lock_bh(&ar_pci->ce_lock);
682 for (i = 0; i < CE_COUNT; i++)
683 __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
684 spin_unlock_bh(&ar_pci->ce_lock);
687 static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
689 struct ath10k *ar = (void *)ptr;
691 ath10k_pci_rx_post(ar);
695 * Diagnostic read/write access is provided for startup/config/debug usage.
696 * Caller must guarantee proper alignment, when applicable, and single user
699 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
702 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
705 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
708 struct ath10k_ce_pipe *ce_diag;
709 /* Host buffer address in CE space */
711 dma_addr_t ce_data_base = 0;
712 void *data_buf = NULL;
715 spin_lock_bh(&ar_pci->ce_lock);
717 ce_diag = ar_pci->ce_diag;
720 * Allocate a temporary bounce buffer to hold caller's data
721 * to be DMA'ed from Target. This guarantees
722 * 1) 4-byte alignment
723 * 2) Buffer in DMA-able space
725 orig_nbytes = nbytes;
726 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
735 memset(data_buf, 0, orig_nbytes);
737 remaining_bytes = orig_nbytes;
738 ce_data = ce_data_base;
739 while (remaining_bytes) {
740 nbytes = min_t(unsigned int, remaining_bytes,
741 DIAG_TRANSFER_LIMIT);
743 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
747 /* Request CE to send from Target(!) address to Host buffer */
749 * The address supplied by the caller is in the
750 * Target CPU virtual address space.
752 * In order to use this address with the diagnostic CE,
753 * convert it from Target CPU virtual address space
754 * to CE address space
756 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
759 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
765 while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
769 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
775 if (nbytes != completed_nbytes) {
780 if (buf != (u32)address) {
786 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
791 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
797 if (nbytes != completed_nbytes) {
802 if (buf != ce_data) {
807 remaining_bytes -= nbytes;
814 memcpy(data, data_buf, orig_nbytes);
816 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
820 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
823 spin_unlock_bh(&ar_pci->ce_lock);
828 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
833 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
834 *value = __le32_to_cpu(val);
839 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
845 host_addr = host_interest_item_address(src);
847 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
849 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
854 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
856 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
864 #define ath10k_pci_diag_read_hi(ar, dest, src, len) \
865 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
867 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
868 const void *data, int nbytes)
870 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
873 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
876 struct ath10k_ce_pipe *ce_diag;
877 void *data_buf = NULL;
878 u32 ce_data; /* Host buffer address in CE space */
879 dma_addr_t ce_data_base = 0;
882 spin_lock_bh(&ar_pci->ce_lock);
884 ce_diag = ar_pci->ce_diag;
887 * Allocate a temporary bounce buffer to hold caller's data
888 * to be DMA'ed to Target. This guarantees
889 * 1) 4-byte alignment
890 * 2) Buffer in DMA-able space
892 orig_nbytes = nbytes;
893 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
902 /* Copy caller's data to allocated DMA buf */
903 memcpy(data_buf, data, orig_nbytes);
906 * The address supplied by the caller is in the
907 * Target CPU virtual address space.
909 * In order to use this address with the diagnostic CE,
911 * Target CPU virtual address space
915 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
917 remaining_bytes = orig_nbytes;
918 ce_data = ce_data_base;
919 while (remaining_bytes) {
920 /* FIXME: check cast */
921 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
923 /* Set up to receive directly into Target(!) address */
924 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
929 * Request CE to send caller-supplied data that
930 * was copied to bounce buffer to Target(!) address.
932 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
938 while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
943 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
949 if (nbytes != completed_nbytes) {
954 if (buf != ce_data) {
960 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
965 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
971 if (nbytes != completed_nbytes) {
976 if (buf != address) {
981 remaining_bytes -= nbytes;
988 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
993 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
996 spin_unlock_bh(&ar_pci->ce_lock);
1001 static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1003 __le32 val = __cpu_to_le32(value);
1005 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1008 /* Called by lower (CE) layer when a send to Target completes. */
1009 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
1011 struct ath10k *ar = ce_state->ar;
1012 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1013 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1014 struct sk_buff_head list;
1015 struct sk_buff *skb;
1017 unsigned int nbytes;
1018 unsigned int transfer_id;
1020 __skb_queue_head_init(&list);
1021 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
1022 &nbytes, &transfer_id) == 0) {
1023 /* no need to call tx completion for NULL pointers */
1027 __skb_queue_tail(&list, skb);
1030 while ((skb = __skb_dequeue(&list)))
1031 cb->tx_completion(ar, skb);
1034 /* Called by lower (CE) layer when data is received from the Target. */
1035 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
1037 struct ath10k *ar = ce_state->ar;
1038 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1039 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1040 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1041 struct sk_buff *skb;
1042 struct sk_buff_head list;
1043 void *transfer_context;
1045 unsigned int nbytes, max_nbytes;
1046 unsigned int transfer_id;
1049 __skb_queue_head_init(&list);
1050 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1051 &ce_data, &nbytes, &transfer_id,
1053 skb = transfer_context;
1054 max_nbytes = skb->len + skb_tailroom(skb);
1055 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1056 max_nbytes, DMA_FROM_DEVICE);
1058 if (unlikely(max_nbytes < nbytes)) {
1059 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1060 nbytes, max_nbytes);
1061 dev_kfree_skb_any(skb);
1065 skb_put(skb, nbytes);
1066 __skb_queue_tail(&list, skb);
1069 while ((skb = __skb_dequeue(&list))) {
1070 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1071 ce_state->id, skb->len);
1072 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1073 skb->data, skb->len);
1075 cb->rx_completion(ar, skb);
1078 ath10k_pci_rx_post_pipe(pipe_info);
1081 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1082 struct ath10k_hif_sg_item *items, int n_items)
1084 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1085 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1086 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1087 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1088 unsigned int nentries_mask;
1089 unsigned int sw_index;
1090 unsigned int write_index;
1093 spin_lock_bh(&ar_pci->ce_lock);
1095 nentries_mask = src_ring->nentries_mask;
1096 sw_index = src_ring->sw_index;
1097 write_index = src_ring->write_index;
1099 if (unlikely(CE_RING_DELTA(nentries_mask,
1100 write_index, sw_index - 1) < n_items)) {
1105 for (i = 0; i < n_items - 1; i++) {
1106 ath10k_dbg(ar, ATH10K_DBG_PCI,
1107 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1108 i, items[i].paddr, items[i].len, n_items);
1109 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1110 items[i].vaddr, items[i].len);
1112 err = ath10k_ce_send_nolock(ce_pipe,
1113 items[i].transfer_context,
1116 items[i].transfer_id,
1117 CE_SEND_FLAG_GATHER);
1122 /* `i` is equal to `n_items -1` after for() */
1124 ath10k_dbg(ar, ATH10K_DBG_PCI,
1125 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1126 i, items[i].paddr, items[i].len, n_items);
1127 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1128 items[i].vaddr, items[i].len);
1130 err = ath10k_ce_send_nolock(ce_pipe,
1131 items[i].transfer_context,
1134 items[i].transfer_id,
1139 spin_unlock_bh(&ar_pci->ce_lock);
1144 __ath10k_ce_send_revert(ce_pipe);
1146 spin_unlock_bh(&ar_pci->ce_lock);
1150 static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1153 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1156 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1158 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1160 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1162 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1165 static void ath10k_pci_dump_registers(struct ath10k *ar,
1166 struct ath10k_fw_crash_data *crash_data)
1168 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1171 lockdep_assert_held(&ar->data_lock);
1173 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
1175 REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1177 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1181 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1183 ath10k_err(ar, "firmware register dump:\n");
1184 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1185 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1187 __le32_to_cpu(reg_dump_values[i]),
1188 __le32_to_cpu(reg_dump_values[i + 1]),
1189 __le32_to_cpu(reg_dump_values[i + 2]),
1190 __le32_to_cpu(reg_dump_values[i + 3]));
1195 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1196 crash_data->registers[i] = reg_dump_values[i];
1199 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1201 struct ath10k_fw_crash_data *crash_data;
1204 spin_lock_bh(&ar->data_lock);
1206 ar->stats.fw_crash_counter++;
1208 crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1211 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1213 scnprintf(uuid, sizeof(uuid), "n/a");
1215 ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
1216 ath10k_print_driver_info(ar);
1217 ath10k_pci_dump_registers(ar, crash_data);
1219 spin_unlock_bh(&ar->data_lock);
1221 queue_work(ar->workqueue, &ar->restart_work);
1224 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1227 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1232 * Decide whether to actually poll for completions, or just
1233 * wait for a later chance.
1234 * If there seem to be plenty of resources left, then just wait
1235 * since checking involves reading a CE register, which is a
1236 * relatively expensive operation.
1238 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1241 * If at least 50% of the total resources are still available,
1242 * don't bother checking again yet.
1244 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1247 ath10k_ce_per_engine_service(ar, pipe);
1250 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
1251 struct ath10k_hif_cb *callbacks)
1253 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1255 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
1257 memcpy(&ar_pci->msg_callbacks_current, callbacks,
1258 sizeof(ar_pci->msg_callbacks_current));
1261 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
1263 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1266 tasklet_kill(&ar_pci->intr_tq);
1267 tasklet_kill(&ar_pci->msi_fw_err);
1269 for (i = 0; i < CE_COUNT; i++)
1270 tasklet_kill(&ar_pci->pipe_info[i].intr);
1272 del_timer_sync(&ar_pci->rx_post_retry);
1275 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1276 u16 service_id, u8 *ul_pipe,
1277 u8 *dl_pipe, int *ul_is_polled,
1280 const struct service_to_pipe *entry;
1281 bool ul_set = false, dl_set = false;
1284 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1286 /* polling for received messages not supported */
1289 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1290 entry = &target_service_to_ce_map_wlan[i];
1292 if (__le32_to_cpu(entry->service_id) != service_id)
1295 switch (__le32_to_cpu(entry->pipedir)) {
1300 *dl_pipe = __le32_to_cpu(entry->pipenum);
1305 *ul_pipe = __le32_to_cpu(entry->pipenum);
1311 *dl_pipe = __le32_to_cpu(entry->pipenum);
1312 *ul_pipe = __le32_to_cpu(entry->pipenum);
1319 if (WARN_ON(!ul_set || !dl_set))
1323 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1328 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1329 u8 *ul_pipe, u8 *dl_pipe)
1331 int ul_is_polled, dl_is_polled;
1333 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1335 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1336 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1343 static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1347 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
1348 val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1350 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
1353 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1357 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
1358 val |= CORE_CTRL_PCIE_REG_31_MASK;
1360 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
1363 static void ath10k_pci_irq_disable(struct ath10k *ar)
1365 ath10k_ce_disable_interrupts(ar);
1366 ath10k_pci_disable_and_clear_legacy_irq(ar);
1367 ath10k_pci_irq_msi_fw_mask(ar);
1370 static void ath10k_pci_irq_sync(struct ath10k *ar)
1372 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1375 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1376 synchronize_irq(ar_pci->pdev->irq + i);
1379 static void ath10k_pci_irq_enable(struct ath10k *ar)
1381 ath10k_ce_enable_interrupts(ar);
1382 ath10k_pci_enable_legacy_irq(ar);
1383 ath10k_pci_irq_msi_fw_unmask(ar);
1386 static int ath10k_pci_hif_start(struct ath10k *ar)
1388 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1389 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1391 ath10k_pci_irq_enable(ar);
1392 ath10k_pci_rx_post(ar);
1394 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1400 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1403 struct ath10k_ce_pipe *ce_pipe;
1404 struct ath10k_ce_ring *ce_ring;
1405 struct sk_buff *skb;
1408 ar = pci_pipe->hif_ce_state;
1409 ce_pipe = pci_pipe->ce_hdl;
1410 ce_ring = ce_pipe->dest_ring;
1415 if (!pci_pipe->buf_sz)
1418 for (i = 0; i < ce_ring->nentries; i++) {
1419 skb = ce_ring->per_transfer_context[i];
1423 ce_ring->per_transfer_context[i] = NULL;
1425 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1426 skb->len + skb_tailroom(skb),
1428 dev_kfree_skb_any(skb);
1432 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1435 struct ath10k_pci *ar_pci;
1436 struct ath10k_ce_pipe *ce_pipe;
1437 struct ath10k_ce_ring *ce_ring;
1438 struct ce_desc *ce_desc;
1439 struct sk_buff *skb;
1442 ar = pci_pipe->hif_ce_state;
1443 ar_pci = ath10k_pci_priv(ar);
1444 ce_pipe = pci_pipe->ce_hdl;
1445 ce_ring = ce_pipe->src_ring;
1450 if (!pci_pipe->buf_sz)
1453 ce_desc = ce_ring->shadow_base;
1454 if (WARN_ON(!ce_desc))
1457 for (i = 0; i < ce_ring->nentries; i++) {
1458 skb = ce_ring->per_transfer_context[i];
1462 ce_ring->per_transfer_context[i] = NULL;
1464 ar_pci->msg_callbacks_current.tx_completion(ar, skb);
1469 * Cleanup residual buffers for device shutdown:
1470 * buffers that were enqueued for receive
1471 * buffers that were to be sent
1472 * Note: Buffers that had completed but which were
1473 * not yet processed are on a completion queue. They
1474 * are handled when the completion thread shuts down.
1476 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1478 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1481 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1482 struct ath10k_pci_pipe *pipe_info;
1484 pipe_info = &ar_pci->pipe_info[pipe_num];
1485 ath10k_pci_rx_pipe_cleanup(pipe_info);
1486 ath10k_pci_tx_pipe_cleanup(pipe_info);
1490 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1494 for (i = 0; i < CE_COUNT; i++)
1495 ath10k_ce_deinit_pipe(ar, i);
1498 static void ath10k_pci_flush(struct ath10k *ar)
1500 ath10k_pci_kill_tasklet(ar);
1501 ath10k_pci_buffer_cleanup(ar);
1504 static void ath10k_pci_hif_stop(struct ath10k *ar)
1506 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1507 unsigned long flags;
1509 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
1511 /* Most likely the device has HTT Rx ring configured. The only way to
1512 * prevent the device from accessing (and possible corrupting) host
1513 * memory is to reset the chip now.
1515 * There's also no known way of masking MSI interrupts on the device.
1516 * For ranged MSI the CE-related interrupts can be masked. However
1517 * regardless how many MSI interrupts are assigned the first one
1518 * is always used for firmware indications (crashes) and cannot be
1519 * masked. To prevent the device from asserting the interrupt reset it
1520 * before proceeding with cleanup.
1522 ath10k_pci_warm_reset(ar);
1524 ath10k_pci_irq_disable(ar);
1525 ath10k_pci_irq_sync(ar);
1526 ath10k_pci_flush(ar);
1528 spin_lock_irqsave(&ar_pci->ps_lock, flags);
1529 WARN_ON(ar_pci->ps_wake_refcount > 0);
1530 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
1533 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1534 void *req, u32 req_len,
1535 void *resp, u32 *resp_len)
1537 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1538 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1539 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1540 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1541 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1542 dma_addr_t req_paddr = 0;
1543 dma_addr_t resp_paddr = 0;
1544 struct bmi_xfer xfer = {};
1545 void *treq, *tresp = NULL;
1550 if (resp && !resp_len)
1553 if (resp && resp_len && *resp_len == 0)
1556 treq = kmemdup(req, req_len, GFP_KERNEL);
1560 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1561 ret = dma_mapping_error(ar->dev, req_paddr);
1565 if (resp && resp_len) {
1566 tresp = kzalloc(*resp_len, GFP_KERNEL);
1572 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1574 ret = dma_mapping_error(ar->dev, resp_paddr);
1578 xfer.wait_for_resp = true;
1581 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
1584 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1588 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1591 unsigned int unused_nbytes;
1592 unsigned int unused_id;
1594 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1595 &unused_nbytes, &unused_id);
1597 /* non-zero means we did not time out */
1605 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1606 dma_unmap_single(ar->dev, resp_paddr,
1607 *resp_len, DMA_FROM_DEVICE);
1610 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1612 if (ret == 0 && resp_len) {
1613 *resp_len = min(*resp_len, xfer.resp_len);
1614 memcpy(resp, tresp, xfer.resp_len);
1623 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1625 struct bmi_xfer *xfer;
1627 unsigned int nbytes;
1628 unsigned int transfer_id;
1630 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1631 &nbytes, &transfer_id))
1634 xfer->tx_done = true;
1637 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1639 struct ath10k *ar = ce_state->ar;
1640 struct bmi_xfer *xfer;
1642 unsigned int nbytes;
1643 unsigned int transfer_id;
1646 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1647 &nbytes, &transfer_id, &flags))
1650 if (WARN_ON_ONCE(!xfer))
1653 if (!xfer->wait_for_resp) {
1654 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
1658 xfer->resp_len = nbytes;
1659 xfer->rx_done = true;
1662 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1663 struct ath10k_ce_pipe *rx_pipe,
1664 struct bmi_xfer *xfer)
1666 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1668 while (time_before_eq(jiffies, timeout)) {
1669 ath10k_pci_bmi_send_done(tx_pipe);
1670 ath10k_pci_bmi_recv_data(rx_pipe);
1672 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1682 * Send an interrupt to the device to wake up the Target CPU
1683 * so it has an opportunity to notice any changed state.
1685 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1689 addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
1690 val = ath10k_pci_read32(ar, addr);
1691 val |= CORE_CTRL_CPU_INTR_MASK;
1692 ath10k_pci_write32(ar, addr, val);
1697 static int ath10k_pci_get_num_banks(struct ath10k *ar)
1699 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1701 switch (ar_pci->pdev->device) {
1702 case QCA988X_2_0_DEVICE_ID:
1703 case QCA99X0_2_0_DEVICE_ID:
1705 case QCA6174_2_1_DEVICE_ID:
1706 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
1707 case QCA6174_HW_1_0_CHIP_ID_REV:
1708 case QCA6174_HW_1_1_CHIP_ID_REV:
1709 case QCA6174_HW_2_1_CHIP_ID_REV:
1710 case QCA6174_HW_2_2_CHIP_ID_REV:
1712 case QCA6174_HW_1_3_CHIP_ID_REV:
1714 case QCA6174_HW_3_0_CHIP_ID_REV:
1715 case QCA6174_HW_3_1_CHIP_ID_REV:
1716 case QCA6174_HW_3_2_CHIP_ID_REV:
1722 ath10k_warn(ar, "unknown number of banks, assuming 1\n");
1726 static int ath10k_pci_init_config(struct ath10k *ar)
1728 u32 interconnect_targ_addr;
1729 u32 pcie_state_targ_addr = 0;
1730 u32 pipe_cfg_targ_addr = 0;
1731 u32 svc_to_pipe_map = 0;
1732 u32 pcie_config_flags = 0;
1734 u32 ealloc_targ_addr;
1736 u32 flag2_targ_addr;
1739 /* Download to Target the CE Config and the service-to-CE map */
1740 interconnect_targ_addr =
1741 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1743 /* Supply Target-side CE configuration */
1744 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
1745 &pcie_state_targ_addr);
1747 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
1751 if (pcie_state_targ_addr == 0) {
1753 ath10k_err(ar, "Invalid pcie state addr\n");
1757 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1758 offsetof(struct pcie_state,
1760 &pipe_cfg_targ_addr);
1762 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
1766 if (pipe_cfg_targ_addr == 0) {
1768 ath10k_err(ar, "Invalid pipe cfg addr\n");
1772 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1773 target_ce_config_wlan,
1774 sizeof(target_ce_config_wlan));
1777 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
1781 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1782 offsetof(struct pcie_state,
1786 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
1790 if (svc_to_pipe_map == 0) {
1792 ath10k_err(ar, "Invalid svc_to_pipe map\n");
1796 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1797 target_service_to_ce_map_wlan,
1798 sizeof(target_service_to_ce_map_wlan));
1800 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
1804 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1805 offsetof(struct pcie_state,
1807 &pcie_config_flags);
1809 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
1813 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1815 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
1816 offsetof(struct pcie_state,
1820 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
1824 /* configure early allocation */
1825 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1827 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
1829 ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
1833 /* first bank is switched to IRAM */
1834 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1835 HI_EARLY_ALLOC_MAGIC_MASK);
1836 ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
1837 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1838 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1840 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
1842 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
1846 /* Tell Target to proceed with initialization */
1847 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1849 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
1851 ath10k_err(ar, "Failed to get option val: %d\n", ret);
1855 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1857 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
1859 ath10k_err(ar, "Failed to set option val: %d\n", ret);
1866 static int ath10k_pci_alloc_pipes(struct ath10k *ar)
1868 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1869 struct ath10k_pci_pipe *pipe;
1872 for (i = 0; i < CE_COUNT; i++) {
1873 pipe = &ar_pci->pipe_info[i];
1874 pipe->ce_hdl = &ar_pci->ce_states[i];
1876 pipe->hif_ce_state = ar;
1878 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
1879 ath10k_pci_ce_send_done,
1880 ath10k_pci_ce_recv_data);
1882 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1887 /* Last CE is Diagnostic Window */
1888 if (i == CE_COUNT - 1) {
1889 ar_pci->ce_diag = pipe->ce_hdl;
1893 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
1899 static void ath10k_pci_free_pipes(struct ath10k *ar)
1903 for (i = 0; i < CE_COUNT; i++)
1904 ath10k_ce_free_pipe(ar, i);
1907 static int ath10k_pci_init_pipes(struct ath10k *ar)
1911 for (i = 0; i < CE_COUNT; i++) {
1912 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
1914 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
1923 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
1925 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
1926 FW_IND_EVENT_PENDING;
1929 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
1933 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1934 val &= ~FW_IND_EVENT_PENDING;
1935 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
1938 /* this function effectively clears target memory controller assert line */
1939 static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1943 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1944 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1945 val | SOC_RESET_CONTROL_SI0_RST_MASK);
1946 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1950 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1951 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1952 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1953 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1958 static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
1962 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1964 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1965 SOC_RESET_CONTROL_ADDRESS);
1966 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1967 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1970 static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
1974 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1975 SOC_RESET_CONTROL_ADDRESS);
1977 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1978 val | SOC_RESET_CONTROL_CE_RST_MASK);
1980 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1981 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1984 static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
1988 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1989 SOC_LF_TIMER_CONTROL0_ADDRESS);
1990 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1991 SOC_LF_TIMER_CONTROL0_ADDRESS,
1992 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1995 static int ath10k_pci_warm_reset(struct ath10k *ar)
1999 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2001 spin_lock_bh(&ar->data_lock);
2002 ar->stats.fw_warm_reset_counter++;
2003 spin_unlock_bh(&ar->data_lock);
2005 ath10k_pci_irq_disable(ar);
2007 /* Make sure the target CPU is not doing anything dangerous, e.g. if it
2008 * were to access copy engine while host performs copy engine reset
2009 * then it is possible for the device to confuse pci-e controller to
2010 * the point of bringing host system to a complete stop (i.e. hang).
2012 ath10k_pci_warm_reset_si0(ar);
2013 ath10k_pci_warm_reset_cpu(ar);
2014 ath10k_pci_init_pipes(ar);
2015 ath10k_pci_wait_for_target_init(ar);
2017 ath10k_pci_warm_reset_clear_lf(ar);
2018 ath10k_pci_warm_reset_ce(ar);
2019 ath10k_pci_warm_reset_cpu(ar);
2020 ath10k_pci_init_pipes(ar);
2022 ret = ath10k_pci_wait_for_target_init(ar);
2024 ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2028 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2033 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2038 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2040 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2041 * It is thus preferred to use warm reset which is safer but may not be
2042 * able to recover the device from all possible fail scenarios.
2044 * Warm reset doesn't always work on first try so attempt it a few
2045 * times before giving up.
2047 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2048 ret = ath10k_pci_warm_reset(ar);
2050 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2051 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2056 /* FIXME: Sometimes copy engine doesn't recover after warm
2057 * reset. In most cases this needs cold reset. In some of these
2058 * cases the device is in such a state that a cold reset may
2061 * Reading any host interest register via copy engine is
2062 * sufficient to verify if device is capable of booting
2065 ret = ath10k_pci_init_pipes(ar);
2067 ath10k_warn(ar, "failed to init copy engine: %d\n",
2072 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2075 ath10k_warn(ar, "failed to poke copy engine: %d\n",
2080 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2084 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2085 ath10k_warn(ar, "refusing cold reset as requested\n");
2089 ret = ath10k_pci_cold_reset(ar);
2091 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2095 ret = ath10k_pci_wait_for_target_init(ar);
2097 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2102 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2107 static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2111 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2113 /* FIXME: QCA6174 requires cold + warm reset to work. */
2115 ret = ath10k_pci_cold_reset(ar);
2117 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2121 ret = ath10k_pci_wait_for_target_init(ar);
2123 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2128 ret = ath10k_pci_warm_reset(ar);
2130 ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2134 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2139 static int ath10k_pci_chip_reset(struct ath10k *ar)
2141 if (QCA_REV_988X(ar))
2142 return ath10k_pci_qca988x_chip_reset(ar);
2143 else if (QCA_REV_6174(ar))
2144 return ath10k_pci_qca6174_chip_reset(ar);
2149 static int ath10k_pci_hif_power_up(struct ath10k *ar)
2151 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2154 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2156 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2158 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2159 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2162 * Bring the target up cleanly.
2164 * The target may be in an undefined state with an AUX-powered Target
2165 * and a Host in WoW mode. If the Host crashes, loses power, or is
2166 * restarted (without unloading the driver) then the Target is left
2167 * (aux) powered and running. On a subsequent driver load, the Target
2168 * is in an unexpected state. We try to catch that here in order to
2169 * reset the Target and retry the probe.
2171 ret = ath10k_pci_chip_reset(ar);
2173 if (ath10k_pci_has_fw_crashed(ar)) {
2174 ath10k_warn(ar, "firmware crashed during chip reset\n");
2175 ath10k_pci_fw_crashed_clear(ar);
2176 ath10k_pci_fw_crashed_dump(ar);
2179 ath10k_err(ar, "failed to reset chip: %d\n", ret);
2183 ret = ath10k_pci_init_pipes(ar);
2185 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2189 ret = ath10k_pci_init_config(ar);
2191 ath10k_err(ar, "failed to setup init config: %d\n", ret);
2195 ret = ath10k_pci_wake_target_cpu(ar);
2197 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2204 ath10k_pci_ce_deinit(ar);
2210 static void ath10k_pci_hif_power_down(struct ath10k *ar)
2212 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2214 /* Currently hif_power_up performs effectively a reset and hif_stop
2215 * resets the chip as well so there's no point in resetting here.
2221 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2223 /* The grace timer can still be counting down and ar->ps_awake be true.
2224 * It is known that the device may be asleep after resuming regardless
2225 * of the SoC powersave state before suspending. Hence make sure the
2226 * device is asleep before proceeding.
2228 ath10k_pci_sleep_sync(ar);
2233 static int ath10k_pci_hif_resume(struct ath10k *ar)
2235 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2236 struct pci_dev *pdev = ar_pci->pdev;
2239 /* Suspend/Resume resets the PCI configuration space, so we have to
2240 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2241 * from interfering with C3 CPU state. pci_restore_state won't help
2242 * here since it only restores the first 64 bytes pci config header.
2244 pci_read_config_dword(pdev, 0x40, &val);
2245 if ((val & 0x0000ff00) != 0)
2246 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2252 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2253 .tx_sg = ath10k_pci_hif_tx_sg,
2254 .diag_read = ath10k_pci_hif_diag_read,
2255 .diag_write = ath10k_pci_diag_write_mem,
2256 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2257 .start = ath10k_pci_hif_start,
2258 .stop = ath10k_pci_hif_stop,
2259 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2260 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2261 .send_complete_check = ath10k_pci_hif_send_complete_check,
2262 .set_callbacks = ath10k_pci_hif_set_callbacks,
2263 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
2264 .power_up = ath10k_pci_hif_power_up,
2265 .power_down = ath10k_pci_hif_power_down,
2266 .read32 = ath10k_pci_read32,
2267 .write32 = ath10k_pci_write32,
2269 .suspend = ath10k_pci_hif_suspend,
2270 .resume = ath10k_pci_hif_resume,
2274 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2276 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2277 struct ath10k_pci *ar_pci = pipe->ar_pci;
2279 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2282 static void ath10k_msi_err_tasklet(unsigned long data)
2284 struct ath10k *ar = (struct ath10k *)data;
2286 if (!ath10k_pci_has_fw_crashed(ar)) {
2287 ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
2291 ath10k_pci_irq_disable(ar);
2292 ath10k_pci_fw_crashed_clear(ar);
2293 ath10k_pci_fw_crashed_dump(ar);
2297 * Handler for a per-engine interrupt on a PARTICULAR CE.
2298 * This is used in cases where each CE has a private MSI interrupt.
2300 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2302 struct ath10k *ar = arg;
2303 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2304 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2306 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2307 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
2313 * NOTE: We are able to derive ce_id from irq because we
2314 * use a one-to-one mapping for CE's 0..5.
2315 * CE's 6 & 7 do not use interrupts at all.
2317 * This mapping must be kept in sync with the mapping
2320 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2324 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2326 struct ath10k *ar = arg;
2327 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2329 tasklet_schedule(&ar_pci->msi_fw_err);
2334 * Top-level interrupt handler for all PCI interrupts from a Target.
2335 * When a block of MSI interrupts is allocated, this top-level handler
2336 * is not used; instead, we directly call the correct sub-handler.
2338 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2340 struct ath10k *ar = arg;
2341 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2343 if (ar_pci->num_msi_intrs == 0) {
2344 if (!ath10k_pci_irq_pending(ar))
2347 ath10k_pci_disable_and_clear_legacy_irq(ar);
2350 tasklet_schedule(&ar_pci->intr_tq);
2355 static void ath10k_pci_tasklet(unsigned long data)
2357 struct ath10k *ar = (struct ath10k *)data;
2358 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2360 if (ath10k_pci_has_fw_crashed(ar)) {
2361 ath10k_pci_irq_disable(ar);
2362 ath10k_pci_fw_crashed_clear(ar);
2363 ath10k_pci_fw_crashed_dump(ar);
2367 ath10k_ce_per_engine_service_any(ar);
2369 /* Re-enable legacy irq that was disabled in the irq handler */
2370 if (ar_pci->num_msi_intrs == 0)
2371 ath10k_pci_enable_legacy_irq(ar);
2374 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2376 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2379 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2380 ath10k_pci_msi_fw_handler,
2381 IRQF_SHARED, "ath10k_pci", ar);
2383 ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
2384 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2388 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2389 ret = request_irq(ar_pci->pdev->irq + i,
2390 ath10k_pci_per_engine_handler,
2391 IRQF_SHARED, "ath10k_pci", ar);
2393 ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
2394 ar_pci->pdev->irq + i, ret);
2396 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2397 free_irq(ar_pci->pdev->irq + i, ar);
2399 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2407 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2409 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2412 ret = request_irq(ar_pci->pdev->irq,
2413 ath10k_pci_interrupt_handler,
2414 IRQF_SHARED, "ath10k_pci", ar);
2416 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
2417 ar_pci->pdev->irq, ret);
2424 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2426 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2429 ret = request_irq(ar_pci->pdev->irq,
2430 ath10k_pci_interrupt_handler,
2431 IRQF_SHARED, "ath10k_pci", ar);
2433 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
2434 ar_pci->pdev->irq, ret);
2441 static int ath10k_pci_request_irq(struct ath10k *ar)
2443 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2445 switch (ar_pci->num_msi_intrs) {
2447 return ath10k_pci_request_irq_legacy(ar);
2449 return ath10k_pci_request_irq_msi(ar);
2450 case MSI_NUM_REQUEST:
2451 return ath10k_pci_request_irq_msix(ar);
2454 ath10k_warn(ar, "unknown irq configuration upon request\n");
2458 static void ath10k_pci_free_irq(struct ath10k *ar)
2460 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2463 /* There's at least one interrupt irregardless whether its legacy INTR
2464 * or MSI or MSI-X */
2465 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2466 free_irq(ar_pci->pdev->irq + i, ar);
2469 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2471 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2474 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2475 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2478 for (i = 0; i < CE_COUNT; i++) {
2479 ar_pci->pipe_info[i].ar_pci = ar_pci;
2480 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2481 (unsigned long)&ar_pci->pipe_info[i]);
2485 static int ath10k_pci_init_irq(struct ath10k *ar)
2487 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2490 ath10k_pci_init_irq_tasklets(ar);
2492 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
2493 ath10k_info(ar, "limiting irq mode to: %d\n",
2494 ath10k_pci_irq_mode);
2497 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
2498 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2499 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2500 ar_pci->num_msi_intrs);
2508 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2509 ar_pci->num_msi_intrs = 1;
2510 ret = pci_enable_msi(ar_pci->pdev);
2519 * A potential race occurs here: The CORE_BASE write
2520 * depends on target correctly decoding AXI address but
2521 * host won't know when target writes BAR to CORE_CTRL.
2522 * This write might get lost if target has NOT written BAR.
2523 * For now, fix the race by repeating the write in below
2524 * synchronization checking. */
2525 ar_pci->num_msi_intrs = 0;
2527 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2528 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2533 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2535 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2539 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2541 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2543 switch (ar_pci->num_msi_intrs) {
2545 ath10k_pci_deinit_irq_legacy(ar);
2549 case MSI_NUM_REQUEST:
2550 pci_disable_msi(ar_pci->pdev);
2553 pci_disable_msi(ar_pci->pdev);
2556 ath10k_warn(ar, "unknown irq configuration upon deinit\n");
2560 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2562 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2563 unsigned long timeout;
2566 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2568 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2571 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2573 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2576 /* target should never return this */
2577 if (val == 0xffffffff)
2580 /* the device has crashed so don't bother trying anymore */
2581 if (val & FW_IND_EVENT_PENDING)
2584 if (val & FW_IND_INITIALIZED)
2587 if (ar_pci->num_msi_intrs == 0)
2588 /* Fix potential race by repeating CORE_BASE writes */
2589 ath10k_pci_enable_legacy_irq(ar);
2592 } while (time_before(jiffies, timeout));
2594 ath10k_pci_disable_and_clear_legacy_irq(ar);
2595 ath10k_pci_irq_msi_fw_mask(ar);
2597 if (val == 0xffffffff) {
2598 ath10k_err(ar, "failed to read device register, device is gone\n");
2602 if (val & FW_IND_EVENT_PENDING) {
2603 ath10k_warn(ar, "device has crashed during init\n");
2607 if (!(val & FW_IND_INITIALIZED)) {
2608 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
2613 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
2617 static int ath10k_pci_cold_reset(struct ath10k *ar)
2622 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
2624 spin_lock_bh(&ar->data_lock);
2626 ar->stats.fw_cold_reset_counter++;
2628 spin_unlock_bh(&ar->data_lock);
2630 /* Put Target, including PCIe, into RESET. */
2631 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2633 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2635 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2636 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2637 RTC_STATE_COLD_RESET_MASK)
2642 /* Pull Target, including PCIe, out of RESET. */
2644 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2646 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2647 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2648 RTC_STATE_COLD_RESET_MASK))
2653 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
2658 static int ath10k_pci_claim(struct ath10k *ar)
2660 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2661 struct pci_dev *pdev = ar_pci->pdev;
2664 pci_set_drvdata(pdev, ar);
2666 ret = pci_enable_device(pdev);
2668 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
2672 ret = pci_request_region(pdev, BAR_NUM, "ath");
2674 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
2679 /* Target expects 32 bit DMA. Enforce it. */
2680 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2682 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
2686 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2688 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
2693 pci_set_master(pdev);
2695 /* Arrange for access to Target SoC registers. */
2696 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
2697 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2699 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
2704 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2708 pci_clear_master(pdev);
2711 pci_release_region(pdev, BAR_NUM);
2714 pci_disable_device(pdev);
2719 static void ath10k_pci_release(struct ath10k *ar)
2721 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2722 struct pci_dev *pdev = ar_pci->pdev;
2724 pci_iounmap(pdev, ar_pci->mem);
2725 pci_release_region(pdev, BAR_NUM);
2726 pci_clear_master(pdev);
2727 pci_disable_device(pdev);
2730 static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
2732 const struct ath10k_pci_supp_chip *supp_chip;
2734 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
2736 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
2737 supp_chip = &ath10k_pci_supp_chips[i];
2739 if (supp_chip->dev_id == dev_id &&
2740 supp_chip->rev_id == rev_id)
2747 static int ath10k_pci_probe(struct pci_dev *pdev,
2748 const struct pci_device_id *pci_dev)
2752 struct ath10k_pci *ar_pci;
2753 enum ath10k_hw_rev hw_rev;
2756 switch (pci_dev->device) {
2757 case QCA988X_2_0_DEVICE_ID:
2758 hw_rev = ATH10K_HW_QCA988X;
2760 case QCA6174_2_1_DEVICE_ID:
2761 hw_rev = ATH10K_HW_QCA6174;
2763 case QCA99X0_2_0_DEVICE_ID:
2764 hw_rev = ATH10K_HW_QCA99X0;
2771 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
2772 hw_rev, &ath10k_pci_hif_ops);
2774 dev_err(&pdev->dev, "failed to allocate core\n");
2778 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
2780 ar_pci = ath10k_pci_priv(ar);
2781 ar_pci->pdev = pdev;
2782 ar_pci->dev = &pdev->dev;
2785 if (pdev->subsystem_vendor || pdev->subsystem_device)
2786 scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
2787 "%04x:%04x:%04x:%04x",
2788 pdev->vendor, pdev->device,
2789 pdev->subsystem_vendor, pdev->subsystem_device);
2791 spin_lock_init(&ar_pci->ce_lock);
2792 spin_lock_init(&ar_pci->ps_lock);
2794 setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
2796 setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
2799 ret = ath10k_pci_claim(ar);
2801 ath10k_err(ar, "failed to claim device: %d\n", ret);
2802 goto err_core_destroy;
2805 ret = ath10k_pci_alloc_pipes(ar);
2807 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
2812 ath10k_pci_ce_deinit(ar);
2813 ath10k_pci_irq_disable(ar);
2815 ret = ath10k_pci_init_irq(ar);
2817 ath10k_err(ar, "failed to init irqs: %d\n", ret);
2818 goto err_free_pipes;
2821 ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
2822 ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
2823 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
2825 ret = ath10k_pci_request_irq(ar);
2827 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
2828 goto err_deinit_irq;
2831 ret = ath10k_pci_chip_reset(ar);
2833 ath10k_err(ar, "failed to reset chip: %d\n", ret);
2837 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2838 if (chip_id == 0xffffffff) {
2839 ath10k_err(ar, "failed to get chip id\n");
2843 if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
2844 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
2845 pdev->device, chip_id);
2849 ret = ath10k_core_register(ar, chip_id);
2851 ath10k_err(ar, "failed to register driver core: %d\n", ret);
2858 ath10k_pci_free_irq(ar);
2859 ath10k_pci_kill_tasklet(ar);
2862 ath10k_pci_deinit_irq(ar);
2865 ath10k_pci_free_pipes(ar);
2868 ath10k_pci_sleep_sync(ar);
2869 ath10k_pci_release(ar);
2872 ath10k_core_destroy(ar);
2877 static void ath10k_pci_remove(struct pci_dev *pdev)
2879 struct ath10k *ar = pci_get_drvdata(pdev);
2880 struct ath10k_pci *ar_pci;
2882 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
2887 ar_pci = ath10k_pci_priv(ar);
2892 ath10k_core_unregister(ar);
2893 ath10k_pci_free_irq(ar);
2894 ath10k_pci_kill_tasklet(ar);
2895 ath10k_pci_deinit_irq(ar);
2896 ath10k_pci_ce_deinit(ar);
2897 ath10k_pci_free_pipes(ar);
2898 ath10k_pci_sleep_sync(ar);
2899 ath10k_pci_release(ar);
2900 ath10k_core_destroy(ar);
2903 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2905 static struct pci_driver ath10k_pci_driver = {
2906 .name = "ath10k_pci",
2907 .id_table = ath10k_pci_id_table,
2908 .probe = ath10k_pci_probe,
2909 .remove = ath10k_pci_remove,
2912 static int __init ath10k_pci_init(void)
2916 ret = pci_register_driver(&ath10k_pci_driver);
2918 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
2923 module_init(ath10k_pci_init);
2925 static void __exit ath10k_pci_exit(void)
2927 pci_unregister_driver(&ath10k_pci_driver);
2930 module_exit(ath10k_pci_exit);
2932 MODULE_AUTHOR("Qualcomm Atheros");
2933 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2934 MODULE_LICENSE("Dual BSD/GPL");
2936 /* QCA988x 2.0 firmware files */
2937 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2938 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
2939 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
2940 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
2941 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
2942 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
2944 /* QCA6174 2.1 firmware files */
2945 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
2946 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
2947 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
2949 /* QCA6174 3.1 firmware files */
2950 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
2951 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
2952 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);