2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
27 #include "targaddrs.h"
36 enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
42 enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
47 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
48 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
50 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
51 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
53 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
54 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
56 /* how long wait to wait for target to initialise, in ms */
57 #define ATH10K_PCI_TARGET_WAIT 3000
58 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
60 #define QCA988X_2_0_DEVICE_ID (0x003c)
61 #define QCA6174_2_1_DEVICE_ID (0x003e)
62 #define QCA99X0_2_0_DEVICE_ID (0x0040)
64 static const struct pci_device_id ath10k_pci_id_table[] = {
65 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
66 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
70 static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
71 /* QCA988X pre 2.0 chips are not supported because they need some nasty
72 * hacks. ath10k doesn't have them and these devices crash horribly
75 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
76 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
77 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
78 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
79 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
80 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
83 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
84 static int ath10k_pci_cold_reset(struct ath10k *ar);
85 static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
86 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
87 static int ath10k_pci_init_irq(struct ath10k *ar);
88 static int ath10k_pci_deinit_irq(struct ath10k *ar);
89 static int ath10k_pci_request_irq(struct ath10k *ar);
90 static void ath10k_pci_free_irq(struct ath10k *ar);
91 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
92 struct ath10k_ce_pipe *rx_pipe,
93 struct bmi_xfer *xfer);
94 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
96 static const struct ce_attr host_ce_config_wlan[] = {
97 /* CE0: host->target HTC control and raw streams */
99 .flags = CE_ATTR_FLAGS,
105 /* CE1: target->host HTT + HTC control */
107 .flags = CE_ATTR_FLAGS,
110 .dest_nentries = 512,
113 /* CE2: target->host WMI */
115 .flags = CE_ATTR_FLAGS,
118 .dest_nentries = 128,
121 /* CE3: host->target WMI */
123 .flags = CE_ATTR_FLAGS,
129 /* CE4: host->target HTT */
131 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
132 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
139 .flags = CE_ATTR_FLAGS,
145 /* CE6: target autonomous hif_memcpy */
147 .flags = CE_ATTR_FLAGS,
153 /* CE7: ce_diag, the Diagnostic Window */
155 .flags = CE_ATTR_FLAGS,
157 .src_sz_max = DIAG_TRANSFER_LIMIT,
161 /* CE8: target->host pktlog */
163 .flags = CE_ATTR_FLAGS,
166 .dest_nentries = 128,
169 /* CE9 target autonomous qcache memcpy */
171 .flags = CE_ATTR_FLAGS,
177 /* CE10: target autonomous hif memcpy */
179 .flags = CE_ATTR_FLAGS,
185 /* CE11: target autonomous hif memcpy */
187 .flags = CE_ATTR_FLAGS,
194 /* Target firmware's Copy Engine configuration. */
195 static const struct ce_pipe_config target_ce_config_wlan[] = {
196 /* CE0: host->target HTC control and raw streams */
198 .pipenum = __cpu_to_le32(0),
199 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
200 .nentries = __cpu_to_le32(32),
201 .nbytes_max = __cpu_to_le32(256),
202 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
203 .reserved = __cpu_to_le32(0),
206 /* CE1: target->host HTT + HTC control */
208 .pipenum = __cpu_to_le32(1),
209 .pipedir = __cpu_to_le32(PIPEDIR_IN),
210 .nentries = __cpu_to_le32(32),
211 .nbytes_max = __cpu_to_le32(2048),
212 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
213 .reserved = __cpu_to_le32(0),
216 /* CE2: target->host WMI */
218 .pipenum = __cpu_to_le32(2),
219 .pipedir = __cpu_to_le32(PIPEDIR_IN),
220 .nentries = __cpu_to_le32(64),
221 .nbytes_max = __cpu_to_le32(2048),
222 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
223 .reserved = __cpu_to_le32(0),
226 /* CE3: host->target WMI */
228 .pipenum = __cpu_to_le32(3),
229 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
230 .nentries = __cpu_to_le32(32),
231 .nbytes_max = __cpu_to_le32(2048),
232 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
233 .reserved = __cpu_to_le32(0),
236 /* CE4: host->target HTT */
238 .pipenum = __cpu_to_le32(4),
239 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
240 .nentries = __cpu_to_le32(256),
241 .nbytes_max = __cpu_to_le32(256),
242 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
243 .reserved = __cpu_to_le32(0),
246 /* NB: 50% of src nentries, since tx has 2 frags */
250 .pipenum = __cpu_to_le32(5),
251 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
252 .nentries = __cpu_to_le32(32),
253 .nbytes_max = __cpu_to_le32(2048),
254 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
255 .reserved = __cpu_to_le32(0),
258 /* CE6: Reserved for target autonomous hif_memcpy */
260 .pipenum = __cpu_to_le32(6),
261 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
262 .nentries = __cpu_to_le32(32),
263 .nbytes_max = __cpu_to_le32(4096),
264 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
265 .reserved = __cpu_to_le32(0),
268 /* CE7 used only by Host */
270 .pipenum = __cpu_to_le32(7),
271 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
272 .nentries = __cpu_to_le32(0),
273 .nbytes_max = __cpu_to_le32(0),
274 .flags = __cpu_to_le32(0),
275 .reserved = __cpu_to_le32(0),
278 /* CE8 target->host packtlog */
280 .pipenum = __cpu_to_le32(8),
281 .pipedir = __cpu_to_le32(PIPEDIR_IN),
282 .nentries = __cpu_to_le32(64),
283 .nbytes_max = __cpu_to_le32(2048),
284 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
285 .reserved = __cpu_to_le32(0),
288 /* CE9 target autonomous qcache memcpy */
290 .pipenum = __cpu_to_le32(9),
291 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
292 .nentries = __cpu_to_le32(32),
293 .nbytes_max = __cpu_to_le32(2048),
294 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
295 .reserved = __cpu_to_le32(0),
298 /* It not necessary to send target wlan configuration for CE10 & CE11
299 * as these CEs are not actively used in target.
304 * Map from service/endpoint to Copy Engine.
305 * This table is derived from the CE_PCI TABLE, above.
306 * It is passed to the Target at startup for use by firmware.
308 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
310 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
311 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
315 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
316 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
320 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
321 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
325 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
326 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
330 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
331 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
335 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
336 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
340 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
341 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
345 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
346 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
350 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
351 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
355 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
356 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
360 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
361 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
365 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
366 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
370 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
371 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
375 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
376 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
380 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
381 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
385 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
386 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
390 /* (Additions here) */
399 static bool ath10k_pci_is_awake(struct ath10k *ar)
401 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
402 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
405 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
408 static void __ath10k_pci_wake(struct ath10k *ar)
410 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
412 lockdep_assert_held(&ar_pci->ps_lock);
414 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
415 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
417 iowrite32(PCIE_SOC_WAKE_V_MASK,
418 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
419 PCIE_SOC_WAKE_ADDRESS);
422 static void __ath10k_pci_sleep(struct ath10k *ar)
424 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
426 lockdep_assert_held(&ar_pci->ps_lock);
428 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
429 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
431 iowrite32(PCIE_SOC_WAKE_RESET,
432 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
433 PCIE_SOC_WAKE_ADDRESS);
434 ar_pci->ps_awake = false;
437 static int ath10k_pci_wake_wait(struct ath10k *ar)
442 while (tot_delay < PCIE_WAKE_TIMEOUT) {
443 if (ath10k_pci_is_awake(ar))
447 tot_delay += curr_delay;
456 static int ath10k_pci_wake(struct ath10k *ar)
458 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
462 spin_lock_irqsave(&ar_pci->ps_lock, flags);
464 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
465 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
467 /* This function can be called very frequently. To avoid excessive
468 * CPU stalls for MMIO reads use a cache var to hold the device state.
470 if (!ar_pci->ps_awake) {
471 __ath10k_pci_wake(ar);
473 ret = ath10k_pci_wake_wait(ar);
475 ar_pci->ps_awake = true;
479 ar_pci->ps_wake_refcount++;
480 WARN_ON(ar_pci->ps_wake_refcount == 0);
483 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
488 static void ath10k_pci_sleep(struct ath10k *ar)
490 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
493 spin_lock_irqsave(&ar_pci->ps_lock, flags);
495 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
496 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
498 if (WARN_ON(ar_pci->ps_wake_refcount == 0))
501 ar_pci->ps_wake_refcount--;
503 mod_timer(&ar_pci->ps_timer, jiffies +
504 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
507 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
510 static void ath10k_pci_ps_timer(unsigned long ptr)
512 struct ath10k *ar = (void *)ptr;
513 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
516 spin_lock_irqsave(&ar_pci->ps_lock, flags);
518 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
519 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
521 if (ar_pci->ps_wake_refcount > 0)
524 __ath10k_pci_sleep(ar);
527 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
530 static void ath10k_pci_sleep_sync(struct ath10k *ar)
532 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
535 del_timer_sync(&ar_pci->ps_timer);
537 spin_lock_irqsave(&ar_pci->ps_lock, flags);
538 WARN_ON(ar_pci->ps_wake_refcount > 0);
539 __ath10k_pci_sleep(ar);
540 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
543 void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
545 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
548 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
549 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
550 offset, offset + sizeof(value), ar_pci->mem_len);
554 ret = ath10k_pci_wake(ar);
556 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
561 iowrite32(value, ar_pci->mem + offset);
562 ath10k_pci_sleep(ar);
565 u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
567 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
571 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
572 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
573 offset, offset + sizeof(val), ar_pci->mem_len);
577 ret = ath10k_pci_wake(ar);
579 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
584 val = ioread32(ar_pci->mem + offset);
585 ath10k_pci_sleep(ar);
590 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
592 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
595 void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
597 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
600 u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
602 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
605 void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
607 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
610 static bool ath10k_pci_irq_pending(struct ath10k *ar)
614 /* Check if the shared legacy irq is for us */
615 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
616 PCIE_INTR_CAUSE_ADDRESS);
617 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
623 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
625 /* IMPORTANT: INTR_CLR register has to be set after
626 * INTR_ENABLE is set to 0, otherwise interrupt can not be
628 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
630 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
631 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
633 /* IMPORTANT: this extra read transaction is required to
634 * flush the posted write buffer. */
635 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
636 PCIE_INTR_ENABLE_ADDRESS);
639 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
641 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
642 PCIE_INTR_ENABLE_ADDRESS,
643 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
645 /* IMPORTANT: this extra read transaction is required to
646 * flush the posted write buffer. */
647 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
648 PCIE_INTR_ENABLE_ADDRESS);
651 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
653 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
655 if (ar_pci->num_msi_intrs > 1)
658 if (ar_pci->num_msi_intrs == 1)
664 static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
666 struct ath10k *ar = pipe->hif_ce_state;
667 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
668 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
673 lockdep_assert_held(&ar_pci->ce_lock);
675 skb = dev_alloc_skb(pipe->buf_sz);
679 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
681 paddr = dma_map_single(ar->dev, skb->data,
682 skb->len + skb_tailroom(skb),
684 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
685 ath10k_warn(ar, "failed to dma map pci rx buf\n");
686 dev_kfree_skb_any(skb);
690 ATH10K_SKB_RXCB(skb)->paddr = paddr;
692 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
694 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
695 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
697 dev_kfree_skb_any(skb);
704 static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
706 struct ath10k *ar = pipe->hif_ce_state;
707 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
708 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
711 lockdep_assert_held(&ar_pci->ce_lock);
713 if (pipe->buf_sz == 0)
716 if (!ce_pipe->dest_ring)
719 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
721 ret = __ath10k_pci_rx_post_buf(pipe);
723 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
724 mod_timer(&ar_pci->rx_post_retry, jiffies +
725 ATH10K_PCI_RX_POST_RETRY_MS);
731 static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
733 struct ath10k *ar = pipe->hif_ce_state;
734 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
736 spin_lock_bh(&ar_pci->ce_lock);
737 __ath10k_pci_rx_post_pipe(pipe);
738 spin_unlock_bh(&ar_pci->ce_lock);
741 static void ath10k_pci_rx_post(struct ath10k *ar)
743 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
746 spin_lock_bh(&ar_pci->ce_lock);
747 for (i = 0; i < CE_COUNT; i++)
748 __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
749 spin_unlock_bh(&ar_pci->ce_lock);
752 static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
754 struct ath10k *ar = (void *)ptr;
756 ath10k_pci_rx_post(ar);
759 static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
763 switch (ar->hw_rev) {
764 case ATH10K_HW_QCA988X:
765 case ATH10K_HW_QCA6174:
766 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
770 case ATH10K_HW_QCA99X0:
771 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
775 val |= 0x100000 | (addr & 0xfffff);
780 * Diagnostic read/write access is provided for startup/config/debug usage.
781 * Caller must guarantee proper alignment, when applicable, and single user
784 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
787 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
790 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
793 struct ath10k_ce_pipe *ce_diag;
794 /* Host buffer address in CE space */
796 dma_addr_t ce_data_base = 0;
797 void *data_buf = NULL;
800 spin_lock_bh(&ar_pci->ce_lock);
802 ce_diag = ar_pci->ce_diag;
805 * Allocate a temporary bounce buffer to hold caller's data
806 * to be DMA'ed from Target. This guarantees
807 * 1) 4-byte alignment
808 * 2) Buffer in DMA-able space
810 orig_nbytes = nbytes;
811 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
820 memset(data_buf, 0, orig_nbytes);
822 remaining_bytes = orig_nbytes;
823 ce_data = ce_data_base;
824 while (remaining_bytes) {
825 nbytes = min_t(unsigned int, remaining_bytes,
826 DIAG_TRANSFER_LIMIT);
828 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
832 /* Request CE to send from Target(!) address to Host buffer */
834 * The address supplied by the caller is in the
835 * Target CPU virtual address space.
837 * In order to use this address with the diagnostic CE,
838 * convert it from Target CPU virtual address space
839 * to CE address space
841 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
843 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
849 while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
853 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
859 if (nbytes != completed_nbytes) {
864 if (buf != (u32)address) {
870 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
875 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
881 if (nbytes != completed_nbytes) {
886 if (buf != ce_data) {
891 remaining_bytes -= nbytes;
898 memcpy(data, data_buf, orig_nbytes);
900 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
904 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
907 spin_unlock_bh(&ar_pci->ce_lock);
912 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
917 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
918 *value = __le32_to_cpu(val);
923 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
929 host_addr = host_interest_item_address(src);
931 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
933 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
938 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
940 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
948 #define ath10k_pci_diag_read_hi(ar, dest, src, len) \
949 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
951 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
952 const void *data, int nbytes)
954 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
957 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
960 struct ath10k_ce_pipe *ce_diag;
961 void *data_buf = NULL;
962 u32 ce_data; /* Host buffer address in CE space */
963 dma_addr_t ce_data_base = 0;
966 spin_lock_bh(&ar_pci->ce_lock);
968 ce_diag = ar_pci->ce_diag;
971 * Allocate a temporary bounce buffer to hold caller's data
972 * to be DMA'ed to Target. This guarantees
973 * 1) 4-byte alignment
974 * 2) Buffer in DMA-able space
976 orig_nbytes = nbytes;
977 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
986 /* Copy caller's data to allocated DMA buf */
987 memcpy(data_buf, data, orig_nbytes);
990 * The address supplied by the caller is in the
991 * Target CPU virtual address space.
993 * In order to use this address with the diagnostic CE,
995 * Target CPU virtual address space
999 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1001 remaining_bytes = orig_nbytes;
1002 ce_data = ce_data_base;
1003 while (remaining_bytes) {
1004 /* FIXME: check cast */
1005 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1007 /* Set up to receive directly into Target(!) address */
1008 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
1013 * Request CE to send caller-supplied data that
1014 * was copied to bounce buffer to Target(!) address.
1016 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
1022 while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
1027 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1033 if (nbytes != completed_nbytes) {
1038 if (buf != ce_data) {
1044 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
1046 &id, &flags) != 0) {
1049 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1055 if (nbytes != completed_nbytes) {
1060 if (buf != address) {
1065 remaining_bytes -= nbytes;
1072 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
1077 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1080 spin_unlock_bh(&ar_pci->ce_lock);
1085 static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1087 __le32 val = __cpu_to_le32(value);
1089 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1092 /* Called by lower (CE) layer when a send to Target completes. */
1093 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
1095 struct ath10k *ar = ce_state->ar;
1096 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1097 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1098 struct sk_buff_head list;
1099 struct sk_buff *skb;
1101 unsigned int nbytes;
1102 unsigned int transfer_id;
1104 __skb_queue_head_init(&list);
1105 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
1106 &nbytes, &transfer_id) == 0) {
1107 /* no need to call tx completion for NULL pointers */
1111 __skb_queue_tail(&list, skb);
1114 while ((skb = __skb_dequeue(&list)))
1115 cb->tx_completion(ar, skb);
1118 /* Called by lower (CE) layer when data is received from the Target. */
1119 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
1121 struct ath10k *ar = ce_state->ar;
1122 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1123 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1124 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1125 struct sk_buff *skb;
1126 struct sk_buff_head list;
1127 void *transfer_context;
1129 unsigned int nbytes, max_nbytes;
1130 unsigned int transfer_id;
1133 __skb_queue_head_init(&list);
1134 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1135 &ce_data, &nbytes, &transfer_id,
1137 skb = transfer_context;
1138 max_nbytes = skb->len + skb_tailroom(skb);
1139 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1140 max_nbytes, DMA_FROM_DEVICE);
1142 if (unlikely(max_nbytes < nbytes)) {
1143 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1144 nbytes, max_nbytes);
1145 dev_kfree_skb_any(skb);
1149 skb_put(skb, nbytes);
1150 __skb_queue_tail(&list, skb);
1153 while ((skb = __skb_dequeue(&list))) {
1154 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1155 ce_state->id, skb->len);
1156 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1157 skb->data, skb->len);
1159 cb->rx_completion(ar, skb);
1162 ath10k_pci_rx_post_pipe(pipe_info);
1165 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1166 struct ath10k_hif_sg_item *items, int n_items)
1168 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1169 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1170 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1171 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1172 unsigned int nentries_mask;
1173 unsigned int sw_index;
1174 unsigned int write_index;
1177 spin_lock_bh(&ar_pci->ce_lock);
1179 nentries_mask = src_ring->nentries_mask;
1180 sw_index = src_ring->sw_index;
1181 write_index = src_ring->write_index;
1183 if (unlikely(CE_RING_DELTA(nentries_mask,
1184 write_index, sw_index - 1) < n_items)) {
1189 for (i = 0; i < n_items - 1; i++) {
1190 ath10k_dbg(ar, ATH10K_DBG_PCI,
1191 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1192 i, items[i].paddr, items[i].len, n_items);
1193 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1194 items[i].vaddr, items[i].len);
1196 err = ath10k_ce_send_nolock(ce_pipe,
1197 items[i].transfer_context,
1200 items[i].transfer_id,
1201 CE_SEND_FLAG_GATHER);
1206 /* `i` is equal to `n_items -1` after for() */
1208 ath10k_dbg(ar, ATH10K_DBG_PCI,
1209 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1210 i, items[i].paddr, items[i].len, n_items);
1211 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1212 items[i].vaddr, items[i].len);
1214 err = ath10k_ce_send_nolock(ce_pipe,
1215 items[i].transfer_context,
1218 items[i].transfer_id,
1223 spin_unlock_bh(&ar_pci->ce_lock);
1228 __ath10k_ce_send_revert(ce_pipe);
1230 spin_unlock_bh(&ar_pci->ce_lock);
1234 static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1237 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1240 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1242 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1244 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1246 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1249 static void ath10k_pci_dump_registers(struct ath10k *ar,
1250 struct ath10k_fw_crash_data *crash_data)
1252 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1255 lockdep_assert_held(&ar->data_lock);
1257 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
1259 REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1261 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1265 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1267 ath10k_err(ar, "firmware register dump:\n");
1268 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1269 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1271 __le32_to_cpu(reg_dump_values[i]),
1272 __le32_to_cpu(reg_dump_values[i + 1]),
1273 __le32_to_cpu(reg_dump_values[i + 2]),
1274 __le32_to_cpu(reg_dump_values[i + 3]));
1279 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1280 crash_data->registers[i] = reg_dump_values[i];
1283 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1285 struct ath10k_fw_crash_data *crash_data;
1288 spin_lock_bh(&ar->data_lock);
1290 ar->stats.fw_crash_counter++;
1292 crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1295 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1297 scnprintf(uuid, sizeof(uuid), "n/a");
1299 ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
1300 ath10k_print_driver_info(ar);
1301 ath10k_pci_dump_registers(ar, crash_data);
1303 spin_unlock_bh(&ar->data_lock);
1305 queue_work(ar->workqueue, &ar->restart_work);
1308 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1311 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1316 * Decide whether to actually poll for completions, or just
1317 * wait for a later chance.
1318 * If there seem to be plenty of resources left, then just wait
1319 * since checking involves reading a CE register, which is a
1320 * relatively expensive operation.
1322 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1325 * If at least 50% of the total resources are still available,
1326 * don't bother checking again yet.
1328 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1331 ath10k_ce_per_engine_service(ar, pipe);
1334 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
1335 struct ath10k_hif_cb *callbacks)
1337 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1339 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
1341 memcpy(&ar_pci->msg_callbacks_current, callbacks,
1342 sizeof(ar_pci->msg_callbacks_current));
1345 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
1347 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1350 tasklet_kill(&ar_pci->intr_tq);
1351 tasklet_kill(&ar_pci->msi_fw_err);
1353 for (i = 0; i < CE_COUNT; i++)
1354 tasklet_kill(&ar_pci->pipe_info[i].intr);
1356 del_timer_sync(&ar_pci->rx_post_retry);
1359 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1360 u16 service_id, u8 *ul_pipe,
1361 u8 *dl_pipe, int *ul_is_polled,
1364 const struct service_to_pipe *entry;
1365 bool ul_set = false, dl_set = false;
1368 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1370 /* polling for received messages not supported */
1373 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1374 entry = &target_service_to_ce_map_wlan[i];
1376 if (__le32_to_cpu(entry->service_id) != service_id)
1379 switch (__le32_to_cpu(entry->pipedir)) {
1384 *dl_pipe = __le32_to_cpu(entry->pipenum);
1389 *ul_pipe = __le32_to_cpu(entry->pipenum);
1395 *dl_pipe = __le32_to_cpu(entry->pipenum);
1396 *ul_pipe = __le32_to_cpu(entry->pipenum);
1403 if (WARN_ON(!ul_set || !dl_set))
1407 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1412 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1413 u8 *ul_pipe, u8 *dl_pipe)
1415 int ul_is_polled, dl_is_polled;
1417 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1419 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1420 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1427 static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1431 switch (ar->hw_rev) {
1432 case ATH10K_HW_QCA988X:
1433 case ATH10K_HW_QCA6174:
1434 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1436 val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1437 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1438 CORE_CTRL_ADDRESS, val);
1440 case ATH10K_HW_QCA99X0:
1441 /* TODO: Find appropriate register configuration for QCA99X0
1448 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1452 switch (ar->hw_rev) {
1453 case ATH10K_HW_QCA988X:
1454 case ATH10K_HW_QCA6174:
1455 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1457 val |= CORE_CTRL_PCIE_REG_31_MASK;
1458 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1459 CORE_CTRL_ADDRESS, val);
1461 case ATH10K_HW_QCA99X0:
1462 /* TODO: Find appropriate register configuration for QCA99X0
1463 * to unmask irq/MSI.
1469 static void ath10k_pci_irq_disable(struct ath10k *ar)
1471 ath10k_ce_disable_interrupts(ar);
1472 ath10k_pci_disable_and_clear_legacy_irq(ar);
1473 ath10k_pci_irq_msi_fw_mask(ar);
1476 static void ath10k_pci_irq_sync(struct ath10k *ar)
1478 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1481 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1482 synchronize_irq(ar_pci->pdev->irq + i);
1485 static void ath10k_pci_irq_enable(struct ath10k *ar)
1487 ath10k_ce_enable_interrupts(ar);
1488 ath10k_pci_enable_legacy_irq(ar);
1489 ath10k_pci_irq_msi_fw_unmask(ar);
1492 static int ath10k_pci_hif_start(struct ath10k *ar)
1494 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1495 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1497 ath10k_pci_irq_enable(ar);
1498 ath10k_pci_rx_post(ar);
1500 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1506 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1509 struct ath10k_ce_pipe *ce_pipe;
1510 struct ath10k_ce_ring *ce_ring;
1511 struct sk_buff *skb;
1514 ar = pci_pipe->hif_ce_state;
1515 ce_pipe = pci_pipe->ce_hdl;
1516 ce_ring = ce_pipe->dest_ring;
1521 if (!pci_pipe->buf_sz)
1524 for (i = 0; i < ce_ring->nentries; i++) {
1525 skb = ce_ring->per_transfer_context[i];
1529 ce_ring->per_transfer_context[i] = NULL;
1531 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1532 skb->len + skb_tailroom(skb),
1534 dev_kfree_skb_any(skb);
1538 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1541 struct ath10k_pci *ar_pci;
1542 struct ath10k_ce_pipe *ce_pipe;
1543 struct ath10k_ce_ring *ce_ring;
1544 struct ce_desc *ce_desc;
1545 struct sk_buff *skb;
1548 ar = pci_pipe->hif_ce_state;
1549 ar_pci = ath10k_pci_priv(ar);
1550 ce_pipe = pci_pipe->ce_hdl;
1551 ce_ring = ce_pipe->src_ring;
1556 if (!pci_pipe->buf_sz)
1559 ce_desc = ce_ring->shadow_base;
1560 if (WARN_ON(!ce_desc))
1563 for (i = 0; i < ce_ring->nentries; i++) {
1564 skb = ce_ring->per_transfer_context[i];
1568 ce_ring->per_transfer_context[i] = NULL;
1570 ar_pci->msg_callbacks_current.tx_completion(ar, skb);
1575 * Cleanup residual buffers for device shutdown:
1576 * buffers that were enqueued for receive
1577 * buffers that were to be sent
1578 * Note: Buffers that had completed but which were
1579 * not yet processed are on a completion queue. They
1580 * are handled when the completion thread shuts down.
1582 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1584 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1587 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1588 struct ath10k_pci_pipe *pipe_info;
1590 pipe_info = &ar_pci->pipe_info[pipe_num];
1591 ath10k_pci_rx_pipe_cleanup(pipe_info);
1592 ath10k_pci_tx_pipe_cleanup(pipe_info);
1596 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1600 for (i = 0; i < CE_COUNT; i++)
1601 ath10k_ce_deinit_pipe(ar, i);
1604 static void ath10k_pci_flush(struct ath10k *ar)
1606 ath10k_pci_kill_tasklet(ar);
1607 ath10k_pci_buffer_cleanup(ar);
1610 static void ath10k_pci_hif_stop(struct ath10k *ar)
1612 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1613 unsigned long flags;
1615 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
1617 /* Most likely the device has HTT Rx ring configured. The only way to
1618 * prevent the device from accessing (and possible corrupting) host
1619 * memory is to reset the chip now.
1621 * There's also no known way of masking MSI interrupts on the device.
1622 * For ranged MSI the CE-related interrupts can be masked. However
1623 * regardless how many MSI interrupts are assigned the first one
1624 * is always used for firmware indications (crashes) and cannot be
1625 * masked. To prevent the device from asserting the interrupt reset it
1626 * before proceeding with cleanup.
1628 ath10k_pci_safe_chip_reset(ar);
1630 ath10k_pci_irq_disable(ar);
1631 ath10k_pci_irq_sync(ar);
1632 ath10k_pci_flush(ar);
1634 spin_lock_irqsave(&ar_pci->ps_lock, flags);
1635 WARN_ON(ar_pci->ps_wake_refcount > 0);
1636 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
1639 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1640 void *req, u32 req_len,
1641 void *resp, u32 *resp_len)
1643 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1644 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1645 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1646 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1647 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1648 dma_addr_t req_paddr = 0;
1649 dma_addr_t resp_paddr = 0;
1650 struct bmi_xfer xfer = {};
1651 void *treq, *tresp = NULL;
1656 if (resp && !resp_len)
1659 if (resp && resp_len && *resp_len == 0)
1662 treq = kmemdup(req, req_len, GFP_KERNEL);
1666 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1667 ret = dma_mapping_error(ar->dev, req_paddr);
1671 if (resp && resp_len) {
1672 tresp = kzalloc(*resp_len, GFP_KERNEL);
1678 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1680 ret = dma_mapping_error(ar->dev, resp_paddr);
1684 xfer.wait_for_resp = true;
1687 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
1690 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1694 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1697 unsigned int unused_nbytes;
1698 unsigned int unused_id;
1700 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1701 &unused_nbytes, &unused_id);
1703 /* non-zero means we did not time out */
1711 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1712 dma_unmap_single(ar->dev, resp_paddr,
1713 *resp_len, DMA_FROM_DEVICE);
1716 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1718 if (ret == 0 && resp_len) {
1719 *resp_len = min(*resp_len, xfer.resp_len);
1720 memcpy(resp, tresp, xfer.resp_len);
1729 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1731 struct bmi_xfer *xfer;
1733 unsigned int nbytes;
1734 unsigned int transfer_id;
1736 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1737 &nbytes, &transfer_id))
1740 xfer->tx_done = true;
1743 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1745 struct ath10k *ar = ce_state->ar;
1746 struct bmi_xfer *xfer;
1748 unsigned int nbytes;
1749 unsigned int transfer_id;
1752 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1753 &nbytes, &transfer_id, &flags))
1756 if (WARN_ON_ONCE(!xfer))
1759 if (!xfer->wait_for_resp) {
1760 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
1764 xfer->resp_len = nbytes;
1765 xfer->rx_done = true;
1768 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1769 struct ath10k_ce_pipe *rx_pipe,
1770 struct bmi_xfer *xfer)
1772 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1774 while (time_before_eq(jiffies, timeout)) {
1775 ath10k_pci_bmi_send_done(tx_pipe);
1776 ath10k_pci_bmi_recv_data(rx_pipe);
1778 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1788 * Send an interrupt to the device to wake up the Target CPU
1789 * so it has an opportunity to notice any changed state.
1791 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1795 addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
1796 val = ath10k_pci_read32(ar, addr);
1797 val |= CORE_CTRL_CPU_INTR_MASK;
1798 ath10k_pci_write32(ar, addr, val);
1803 static int ath10k_pci_get_num_banks(struct ath10k *ar)
1805 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1807 switch (ar_pci->pdev->device) {
1808 case QCA988X_2_0_DEVICE_ID:
1809 case QCA99X0_2_0_DEVICE_ID:
1811 case QCA6174_2_1_DEVICE_ID:
1812 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
1813 case QCA6174_HW_1_0_CHIP_ID_REV:
1814 case QCA6174_HW_1_1_CHIP_ID_REV:
1815 case QCA6174_HW_2_1_CHIP_ID_REV:
1816 case QCA6174_HW_2_2_CHIP_ID_REV:
1818 case QCA6174_HW_1_3_CHIP_ID_REV:
1820 case QCA6174_HW_3_0_CHIP_ID_REV:
1821 case QCA6174_HW_3_1_CHIP_ID_REV:
1822 case QCA6174_HW_3_2_CHIP_ID_REV:
1828 ath10k_warn(ar, "unknown number of banks, assuming 1\n");
1832 static int ath10k_pci_init_config(struct ath10k *ar)
1834 u32 interconnect_targ_addr;
1835 u32 pcie_state_targ_addr = 0;
1836 u32 pipe_cfg_targ_addr = 0;
1837 u32 svc_to_pipe_map = 0;
1838 u32 pcie_config_flags = 0;
1840 u32 ealloc_targ_addr;
1842 u32 flag2_targ_addr;
1845 /* Download to Target the CE Config and the service-to-CE map */
1846 interconnect_targ_addr =
1847 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1849 /* Supply Target-side CE configuration */
1850 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
1851 &pcie_state_targ_addr);
1853 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
1857 if (pcie_state_targ_addr == 0) {
1859 ath10k_err(ar, "Invalid pcie state addr\n");
1863 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1864 offsetof(struct pcie_state,
1866 &pipe_cfg_targ_addr);
1868 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
1872 if (pipe_cfg_targ_addr == 0) {
1874 ath10k_err(ar, "Invalid pipe cfg addr\n");
1878 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1879 target_ce_config_wlan,
1880 sizeof(struct ce_pipe_config) *
1881 NUM_TARGET_CE_CONFIG_WLAN);
1884 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
1888 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1889 offsetof(struct pcie_state,
1893 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
1897 if (svc_to_pipe_map == 0) {
1899 ath10k_err(ar, "Invalid svc_to_pipe map\n");
1903 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1904 target_service_to_ce_map_wlan,
1905 sizeof(target_service_to_ce_map_wlan));
1907 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
1911 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1912 offsetof(struct pcie_state,
1914 &pcie_config_flags);
1916 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
1920 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1922 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
1923 offsetof(struct pcie_state,
1927 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
1931 /* configure early allocation */
1932 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1934 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
1936 ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
1940 /* first bank is switched to IRAM */
1941 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1942 HI_EARLY_ALLOC_MAGIC_MASK);
1943 ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
1944 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1945 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1947 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
1949 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
1953 /* Tell Target to proceed with initialization */
1954 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1956 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
1958 ath10k_err(ar, "Failed to get option val: %d\n", ret);
1962 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1964 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
1966 ath10k_err(ar, "Failed to set option val: %d\n", ret);
1973 static int ath10k_pci_alloc_pipes(struct ath10k *ar)
1975 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1976 struct ath10k_pci_pipe *pipe;
1979 for (i = 0; i < CE_COUNT; i++) {
1980 pipe = &ar_pci->pipe_info[i];
1981 pipe->ce_hdl = &ar_pci->ce_states[i];
1983 pipe->hif_ce_state = ar;
1985 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
1986 ath10k_pci_ce_send_done,
1987 ath10k_pci_ce_recv_data);
1989 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1994 /* Last CE is Diagnostic Window */
1995 if (i == CE_DIAG_PIPE) {
1996 ar_pci->ce_diag = pipe->ce_hdl;
2000 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
2006 static void ath10k_pci_free_pipes(struct ath10k *ar)
2010 for (i = 0; i < CE_COUNT; i++)
2011 ath10k_ce_free_pipe(ar, i);
2014 static int ath10k_pci_init_pipes(struct ath10k *ar)
2018 for (i = 0; i < CE_COUNT; i++) {
2019 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
2021 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2030 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2032 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2033 FW_IND_EVENT_PENDING;
2036 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2040 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2041 val &= ~FW_IND_EVENT_PENDING;
2042 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2045 /* this function effectively clears target memory controller assert line */
2046 static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2050 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2051 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2052 val | SOC_RESET_CONTROL_SI0_RST_MASK);
2053 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2057 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2058 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2059 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2060 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2065 static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2069 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2071 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2072 SOC_RESET_CONTROL_ADDRESS);
2073 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2074 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2077 static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2081 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2082 SOC_RESET_CONTROL_ADDRESS);
2084 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2085 val | SOC_RESET_CONTROL_CE_RST_MASK);
2087 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2088 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2091 static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2095 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2096 SOC_LF_TIMER_CONTROL0_ADDRESS);
2097 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2098 SOC_LF_TIMER_CONTROL0_ADDRESS,
2099 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2102 static int ath10k_pci_warm_reset(struct ath10k *ar)
2106 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2108 spin_lock_bh(&ar->data_lock);
2109 ar->stats.fw_warm_reset_counter++;
2110 spin_unlock_bh(&ar->data_lock);
2112 ath10k_pci_irq_disable(ar);
2114 /* Make sure the target CPU is not doing anything dangerous, e.g. if it
2115 * were to access copy engine while host performs copy engine reset
2116 * then it is possible for the device to confuse pci-e controller to
2117 * the point of bringing host system to a complete stop (i.e. hang).
2119 ath10k_pci_warm_reset_si0(ar);
2120 ath10k_pci_warm_reset_cpu(ar);
2121 ath10k_pci_init_pipes(ar);
2122 ath10k_pci_wait_for_target_init(ar);
2124 ath10k_pci_warm_reset_clear_lf(ar);
2125 ath10k_pci_warm_reset_ce(ar);
2126 ath10k_pci_warm_reset_cpu(ar);
2127 ath10k_pci_init_pipes(ar);
2129 ret = ath10k_pci_wait_for_target_init(ar);
2131 ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2135 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2140 static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2142 if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
2143 return ath10k_pci_warm_reset(ar);
2144 } else if (QCA_REV_99X0(ar)) {
2145 ath10k_pci_irq_disable(ar);
2146 return ath10k_pci_qca99x0_chip_reset(ar);
2152 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2157 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2159 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2160 * It is thus preferred to use warm reset which is safer but may not be
2161 * able to recover the device from all possible fail scenarios.
2163 * Warm reset doesn't always work on first try so attempt it a few
2164 * times before giving up.
2166 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2167 ret = ath10k_pci_warm_reset(ar);
2169 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2170 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2175 /* FIXME: Sometimes copy engine doesn't recover after warm
2176 * reset. In most cases this needs cold reset. In some of these
2177 * cases the device is in such a state that a cold reset may
2180 * Reading any host interest register via copy engine is
2181 * sufficient to verify if device is capable of booting
2184 ret = ath10k_pci_init_pipes(ar);
2186 ath10k_warn(ar, "failed to init copy engine: %d\n",
2191 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2194 ath10k_warn(ar, "failed to poke copy engine: %d\n",
2199 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2203 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2204 ath10k_warn(ar, "refusing cold reset as requested\n");
2208 ret = ath10k_pci_cold_reset(ar);
2210 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2214 ret = ath10k_pci_wait_for_target_init(ar);
2216 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2221 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2226 static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2230 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2232 /* FIXME: QCA6174 requires cold + warm reset to work. */
2234 ret = ath10k_pci_cold_reset(ar);
2236 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2240 ret = ath10k_pci_wait_for_target_init(ar);
2242 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2247 ret = ath10k_pci_warm_reset(ar);
2249 ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2253 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2258 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2262 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2264 ret = ath10k_pci_cold_reset(ar);
2266 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2270 ret = ath10k_pci_wait_for_target_init(ar);
2272 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2277 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2282 static int ath10k_pci_chip_reset(struct ath10k *ar)
2284 if (QCA_REV_988X(ar))
2285 return ath10k_pci_qca988x_chip_reset(ar);
2286 else if (QCA_REV_6174(ar))
2287 return ath10k_pci_qca6174_chip_reset(ar);
2288 else if (QCA_REV_99X0(ar))
2289 return ath10k_pci_qca99x0_chip_reset(ar);
2294 static int ath10k_pci_hif_power_up(struct ath10k *ar)
2296 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2299 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2301 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2303 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2304 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2307 * Bring the target up cleanly.
2309 * The target may be in an undefined state with an AUX-powered Target
2310 * and a Host in WoW mode. If the Host crashes, loses power, or is
2311 * restarted (without unloading the driver) then the Target is left
2312 * (aux) powered and running. On a subsequent driver load, the Target
2313 * is in an unexpected state. We try to catch that here in order to
2314 * reset the Target and retry the probe.
2316 ret = ath10k_pci_chip_reset(ar);
2318 if (ath10k_pci_has_fw_crashed(ar)) {
2319 ath10k_warn(ar, "firmware crashed during chip reset\n");
2320 ath10k_pci_fw_crashed_clear(ar);
2321 ath10k_pci_fw_crashed_dump(ar);
2324 ath10k_err(ar, "failed to reset chip: %d\n", ret);
2328 ret = ath10k_pci_init_pipes(ar);
2330 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2334 ret = ath10k_pci_init_config(ar);
2336 ath10k_err(ar, "failed to setup init config: %d\n", ret);
2340 ret = ath10k_pci_wake_target_cpu(ar);
2342 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2349 ath10k_pci_ce_deinit(ar);
2355 static void ath10k_pci_hif_power_down(struct ath10k *ar)
2357 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2359 /* Currently hif_power_up performs effectively a reset and hif_stop
2360 * resets the chip as well so there's no point in resetting here.
2366 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2368 /* The grace timer can still be counting down and ar->ps_awake be true.
2369 * It is known that the device may be asleep after resuming regardless
2370 * of the SoC powersave state before suspending. Hence make sure the
2371 * device is asleep before proceeding.
2373 ath10k_pci_sleep_sync(ar);
2378 static int ath10k_pci_hif_resume(struct ath10k *ar)
2380 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2381 struct pci_dev *pdev = ar_pci->pdev;
2384 /* Suspend/Resume resets the PCI configuration space, so we have to
2385 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2386 * from interfering with C3 CPU state. pci_restore_state won't help
2387 * here since it only restores the first 64 bytes pci config header.
2389 pci_read_config_dword(pdev, 0x40, &val);
2390 if ((val & 0x0000ff00) != 0)
2391 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2397 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2398 .tx_sg = ath10k_pci_hif_tx_sg,
2399 .diag_read = ath10k_pci_hif_diag_read,
2400 .diag_write = ath10k_pci_diag_write_mem,
2401 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2402 .start = ath10k_pci_hif_start,
2403 .stop = ath10k_pci_hif_stop,
2404 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2405 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2406 .send_complete_check = ath10k_pci_hif_send_complete_check,
2407 .set_callbacks = ath10k_pci_hif_set_callbacks,
2408 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
2409 .power_up = ath10k_pci_hif_power_up,
2410 .power_down = ath10k_pci_hif_power_down,
2411 .read32 = ath10k_pci_read32,
2412 .write32 = ath10k_pci_write32,
2414 .suspend = ath10k_pci_hif_suspend,
2415 .resume = ath10k_pci_hif_resume,
2419 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2421 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2422 struct ath10k_pci *ar_pci = pipe->ar_pci;
2424 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2427 static void ath10k_msi_err_tasklet(unsigned long data)
2429 struct ath10k *ar = (struct ath10k *)data;
2431 if (!ath10k_pci_has_fw_crashed(ar)) {
2432 ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
2436 ath10k_pci_irq_disable(ar);
2437 ath10k_pci_fw_crashed_clear(ar);
2438 ath10k_pci_fw_crashed_dump(ar);
2442 * Handler for a per-engine interrupt on a PARTICULAR CE.
2443 * This is used in cases where each CE has a private MSI interrupt.
2445 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2447 struct ath10k *ar = arg;
2448 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2449 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2451 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2452 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
2458 * NOTE: We are able to derive ce_id from irq because we
2459 * use a one-to-one mapping for CE's 0..5.
2460 * CE's 6 & 7 do not use interrupts at all.
2462 * This mapping must be kept in sync with the mapping
2465 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2469 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2471 struct ath10k *ar = arg;
2472 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2474 tasklet_schedule(&ar_pci->msi_fw_err);
2479 * Top-level interrupt handler for all PCI interrupts from a Target.
2480 * When a block of MSI interrupts is allocated, this top-level handler
2481 * is not used; instead, we directly call the correct sub-handler.
2483 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2485 struct ath10k *ar = arg;
2486 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2488 if (ar_pci->num_msi_intrs == 0) {
2489 if (!ath10k_pci_irq_pending(ar))
2492 ath10k_pci_disable_and_clear_legacy_irq(ar);
2495 tasklet_schedule(&ar_pci->intr_tq);
2500 static void ath10k_pci_tasklet(unsigned long data)
2502 struct ath10k *ar = (struct ath10k *)data;
2503 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2505 if (ath10k_pci_has_fw_crashed(ar)) {
2506 ath10k_pci_irq_disable(ar);
2507 ath10k_pci_fw_crashed_clear(ar);
2508 ath10k_pci_fw_crashed_dump(ar);
2512 ath10k_ce_per_engine_service_any(ar);
2514 /* Re-enable legacy irq that was disabled in the irq handler */
2515 if (ar_pci->num_msi_intrs == 0)
2516 ath10k_pci_enable_legacy_irq(ar);
2519 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2521 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2524 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2525 ath10k_pci_msi_fw_handler,
2526 IRQF_SHARED, "ath10k_pci", ar);
2528 ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
2529 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2533 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2534 ret = request_irq(ar_pci->pdev->irq + i,
2535 ath10k_pci_per_engine_handler,
2536 IRQF_SHARED, "ath10k_pci", ar);
2538 ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
2539 ar_pci->pdev->irq + i, ret);
2541 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2542 free_irq(ar_pci->pdev->irq + i, ar);
2544 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2552 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2554 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2557 ret = request_irq(ar_pci->pdev->irq,
2558 ath10k_pci_interrupt_handler,
2559 IRQF_SHARED, "ath10k_pci", ar);
2561 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
2562 ar_pci->pdev->irq, ret);
2569 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2571 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2574 ret = request_irq(ar_pci->pdev->irq,
2575 ath10k_pci_interrupt_handler,
2576 IRQF_SHARED, "ath10k_pci", ar);
2578 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
2579 ar_pci->pdev->irq, ret);
2586 static int ath10k_pci_request_irq(struct ath10k *ar)
2588 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2590 switch (ar_pci->num_msi_intrs) {
2592 return ath10k_pci_request_irq_legacy(ar);
2594 return ath10k_pci_request_irq_msi(ar);
2595 case MSI_NUM_REQUEST:
2596 return ath10k_pci_request_irq_msix(ar);
2599 ath10k_warn(ar, "unknown irq configuration upon request\n");
2603 static void ath10k_pci_free_irq(struct ath10k *ar)
2605 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2608 /* There's at least one interrupt irregardless whether its legacy INTR
2609 * or MSI or MSI-X */
2610 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2611 free_irq(ar_pci->pdev->irq + i, ar);
2614 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2616 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2619 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2620 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2623 for (i = 0; i < CE_COUNT; i++) {
2624 ar_pci->pipe_info[i].ar_pci = ar_pci;
2625 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2626 (unsigned long)&ar_pci->pipe_info[i]);
2630 static int ath10k_pci_init_irq(struct ath10k *ar)
2632 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2635 ath10k_pci_init_irq_tasklets(ar);
2637 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
2638 ath10k_info(ar, "limiting irq mode to: %d\n",
2639 ath10k_pci_irq_mode);
2642 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
2643 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2644 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2645 ar_pci->num_msi_intrs);
2653 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2654 ar_pci->num_msi_intrs = 1;
2655 ret = pci_enable_msi(ar_pci->pdev);
2664 * A potential race occurs here: The CORE_BASE write
2665 * depends on target correctly decoding AXI address but
2666 * host won't know when target writes BAR to CORE_CTRL.
2667 * This write might get lost if target has NOT written BAR.
2668 * For now, fix the race by repeating the write in below
2669 * synchronization checking. */
2670 ar_pci->num_msi_intrs = 0;
2672 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2673 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2678 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2680 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2684 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2686 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2688 switch (ar_pci->num_msi_intrs) {
2690 ath10k_pci_deinit_irq_legacy(ar);
2694 case MSI_NUM_REQUEST:
2695 pci_disable_msi(ar_pci->pdev);
2698 pci_disable_msi(ar_pci->pdev);
2701 ath10k_warn(ar, "unknown irq configuration upon deinit\n");
2705 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2707 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2708 unsigned long timeout;
2711 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2713 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2716 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2718 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2721 /* target should never return this */
2722 if (val == 0xffffffff)
2725 /* the device has crashed so don't bother trying anymore */
2726 if (val & FW_IND_EVENT_PENDING)
2729 if (val & FW_IND_INITIALIZED)
2732 if (ar_pci->num_msi_intrs == 0)
2733 /* Fix potential race by repeating CORE_BASE writes */
2734 ath10k_pci_enable_legacy_irq(ar);
2737 } while (time_before(jiffies, timeout));
2739 ath10k_pci_disable_and_clear_legacy_irq(ar);
2740 ath10k_pci_irq_msi_fw_mask(ar);
2742 if (val == 0xffffffff) {
2743 ath10k_err(ar, "failed to read device register, device is gone\n");
2747 if (val & FW_IND_EVENT_PENDING) {
2748 ath10k_warn(ar, "device has crashed during init\n");
2752 if (!(val & FW_IND_INITIALIZED)) {
2753 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
2758 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
2762 static int ath10k_pci_cold_reset(struct ath10k *ar)
2767 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
2769 spin_lock_bh(&ar->data_lock);
2771 ar->stats.fw_cold_reset_counter++;
2773 spin_unlock_bh(&ar->data_lock);
2775 /* Put Target, including PCIe, into RESET. */
2776 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2778 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2780 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2781 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2782 RTC_STATE_COLD_RESET_MASK)
2787 /* Pull Target, including PCIe, out of RESET. */
2789 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2791 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2792 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2793 RTC_STATE_COLD_RESET_MASK))
2798 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
2803 static int ath10k_pci_claim(struct ath10k *ar)
2805 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2806 struct pci_dev *pdev = ar_pci->pdev;
2809 pci_set_drvdata(pdev, ar);
2811 ret = pci_enable_device(pdev);
2813 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
2817 ret = pci_request_region(pdev, BAR_NUM, "ath");
2819 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
2824 /* Target expects 32 bit DMA. Enforce it. */
2825 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2827 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
2831 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2833 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
2838 pci_set_master(pdev);
2840 /* Arrange for access to Target SoC registers. */
2841 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
2842 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2844 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
2849 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2853 pci_clear_master(pdev);
2856 pci_release_region(pdev, BAR_NUM);
2859 pci_disable_device(pdev);
2864 static void ath10k_pci_release(struct ath10k *ar)
2866 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2867 struct pci_dev *pdev = ar_pci->pdev;
2869 pci_iounmap(pdev, ar_pci->mem);
2870 pci_release_region(pdev, BAR_NUM);
2871 pci_clear_master(pdev);
2872 pci_disable_device(pdev);
2875 static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
2877 const struct ath10k_pci_supp_chip *supp_chip;
2879 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
2881 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
2882 supp_chip = &ath10k_pci_supp_chips[i];
2884 if (supp_chip->dev_id == dev_id &&
2885 supp_chip->rev_id == rev_id)
2892 static int ath10k_pci_probe(struct pci_dev *pdev,
2893 const struct pci_device_id *pci_dev)
2897 struct ath10k_pci *ar_pci;
2898 enum ath10k_hw_rev hw_rev;
2901 switch (pci_dev->device) {
2902 case QCA988X_2_0_DEVICE_ID:
2903 hw_rev = ATH10K_HW_QCA988X;
2905 case QCA6174_2_1_DEVICE_ID:
2906 hw_rev = ATH10K_HW_QCA6174;
2908 case QCA99X0_2_0_DEVICE_ID:
2909 hw_rev = ATH10K_HW_QCA99X0;
2916 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
2917 hw_rev, &ath10k_pci_hif_ops);
2919 dev_err(&pdev->dev, "failed to allocate core\n");
2923 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
2925 ar_pci = ath10k_pci_priv(ar);
2926 ar_pci->pdev = pdev;
2927 ar_pci->dev = &pdev->dev;
2930 if (pdev->subsystem_vendor || pdev->subsystem_device)
2931 scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
2932 "%04x:%04x:%04x:%04x",
2933 pdev->vendor, pdev->device,
2934 pdev->subsystem_vendor, pdev->subsystem_device);
2936 spin_lock_init(&ar_pci->ce_lock);
2937 spin_lock_init(&ar_pci->ps_lock);
2939 setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
2941 setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
2944 ret = ath10k_pci_claim(ar);
2946 ath10k_err(ar, "failed to claim device: %d\n", ret);
2947 goto err_core_destroy;
2950 ret = ath10k_pci_alloc_pipes(ar);
2952 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
2957 ath10k_pci_ce_deinit(ar);
2958 ath10k_pci_irq_disable(ar);
2960 ret = ath10k_pci_init_irq(ar);
2962 ath10k_err(ar, "failed to init irqs: %d\n", ret);
2963 goto err_free_pipes;
2966 ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
2967 ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
2968 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
2970 ret = ath10k_pci_request_irq(ar);
2972 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
2973 goto err_deinit_irq;
2976 ret = ath10k_pci_chip_reset(ar);
2978 ath10k_err(ar, "failed to reset chip: %d\n", ret);
2982 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2983 if (chip_id == 0xffffffff) {
2984 ath10k_err(ar, "failed to get chip id\n");
2988 if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
2989 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
2990 pdev->device, chip_id);
2994 ret = ath10k_core_register(ar, chip_id);
2996 ath10k_err(ar, "failed to register driver core: %d\n", ret);
3003 ath10k_pci_free_irq(ar);
3004 ath10k_pci_kill_tasklet(ar);
3007 ath10k_pci_deinit_irq(ar);
3010 ath10k_pci_free_pipes(ar);
3013 ath10k_pci_sleep_sync(ar);
3014 ath10k_pci_release(ar);
3017 ath10k_core_destroy(ar);
3022 static void ath10k_pci_remove(struct pci_dev *pdev)
3024 struct ath10k *ar = pci_get_drvdata(pdev);
3025 struct ath10k_pci *ar_pci;
3027 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3032 ar_pci = ath10k_pci_priv(ar);
3037 ath10k_core_unregister(ar);
3038 ath10k_pci_free_irq(ar);
3039 ath10k_pci_kill_tasklet(ar);
3040 ath10k_pci_deinit_irq(ar);
3041 ath10k_pci_ce_deinit(ar);
3042 ath10k_pci_free_pipes(ar);
3043 ath10k_pci_sleep_sync(ar);
3044 ath10k_pci_release(ar);
3045 ath10k_core_destroy(ar);
3048 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3050 static struct pci_driver ath10k_pci_driver = {
3051 .name = "ath10k_pci",
3052 .id_table = ath10k_pci_id_table,
3053 .probe = ath10k_pci_probe,
3054 .remove = ath10k_pci_remove,
3057 static int __init ath10k_pci_init(void)
3061 ret = pci_register_driver(&ath10k_pci_driver);
3063 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3068 module_init(ath10k_pci_init);
3070 static void __exit ath10k_pci_exit(void)
3072 pci_unregister_driver(&ath10k_pci_driver);
3075 module_exit(ath10k_pci_exit);
3077 MODULE_AUTHOR("Qualcomm Atheros");
3078 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
3079 MODULE_LICENSE("Dual BSD/GPL");
3081 /* QCA988x 2.0 firmware files */
3082 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
3083 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3084 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3085 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3086 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3087 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3089 /* QCA6174 2.1 firmware files */
3090 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3091 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3092 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3094 /* QCA6174 3.1 firmware files */
3095 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3096 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3097 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);