1 /******************************************************************************
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
22 * The full GNU General Public License is included in this distribution in the
23 * file called LICENSE.
25 * Contact Information:
26 * Intel Linux Wireless <ilw@linux.intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29 *****************************************************************************/
30 #include <linux/sched.h>
31 #include <linux/wait.h>
32 #include <linux/gfp.h>
37 #include "iwl-op-mode.h"
39 /******************************************************************************
43 ******************************************************************************/
46 * Rx theory of operation
48 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
49 * each of which point to Receive Buffers to be filled by the NIC. These get
50 * used not only for Rx frames, but for any command response or notification
51 * from the NIC. The driver and NIC manage the Rx buffers by means
52 * of indexes into the circular buffer.
55 * The host/firmware share two index registers for managing the Rx buffers.
57 * The READ index maps to the first position that the firmware may be writing
58 * to -- the driver can read up to (but not including) this position and get
60 * The READ index is managed by the firmware once the card is enabled.
62 * The WRITE index maps to the last position the driver has read from -- the
63 * position preceding WRITE is the last slot the firmware can place a packet.
65 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
68 * During initialization, the host sets up the READ queue position to the first
69 * INDEX position, and WRITE to the last (READ - 1 wrapped)
71 * When the firmware places a packet in a buffer, it will advance the READ index
72 * and fire the RX interrupt. The driver can then query the READ index and
73 * process as many packets as possible, moving the WRITE index forward as it
74 * resets the Rx queue buffers with new memory.
76 * The management in the driver is as follows:
77 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
78 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
79 * to replenish the iwl->rxq->rx_free.
80 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
81 * iwl->rxq is replenished and the READ INDEX is updated (updating the
82 * 'processed' and 'read' driver indexes as well)
83 * + A received packet is processed and handed to the kernel network stack,
84 * detached from the iwl->rxq. The driver 'processed' index is updated.
85 * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
86 * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
87 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
88 * If there were enough free buffers and RX_STALLED is set it is cleared.
93 * iwl_rxq_alloc() Allocates rx_free
94 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
95 * iwl_pcie_rxq_restock
96 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
97 * queue, updates firmware pointers, and updates
98 * the WRITE index. If insufficient rx_free buffers
99 * are available, schedules iwl_pcie_rx_replenish
101 * -- enable interrupts --
102 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
103 * READ INDEX, detaching the SKB from the pool.
104 * Moves the packet buffer from queue to rx_used.
105 * Calls iwl_pcie_rxq_restock to refill any empty
112 * iwl_rxq_space - Return number of free slots available in queue.
114 static int iwl_rxq_space(const struct iwl_rxq *rxq)
116 /* Make sure RX_QUEUE_SIZE is a power of 2 */
117 BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
120 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
121 * between empty and completely full queues.
122 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
123 * defined for negative dividends.
125 return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
129 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
131 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
133 return cpu_to_le32((u32)(dma_addr >> 8));
137 * iwl_pcie_rx_stop - stops the Rx DMA
139 int iwl_pcie_rx_stop(struct iwl_trans *trans)
141 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
142 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
143 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
147 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
149 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
151 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
152 struct iwl_rxq *rxq = &trans_pcie->rxq;
155 lockdep_assert_held(&rxq->lock);
158 * explicitly wake up the NIC if:
159 * 1. shadow registers aren't enabled
160 * 2. there is a chance that the NIC is asleep
162 if (!trans->cfg->base_params->shadow_reg_enable &&
163 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
164 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
166 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
167 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
169 iwl_set_bit(trans, CSR_GP_CNTRL,
170 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
171 rxq->need_update = true;
176 rxq->write_actual = round_down(rxq->write, 8);
177 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
180 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
182 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
183 struct iwl_rxq *rxq = &trans_pcie->rxq;
185 spin_lock(&rxq->lock);
187 if (!rxq->need_update)
190 iwl_pcie_rxq_inc_wr_ptr(trans);
191 rxq->need_update = false;
194 spin_unlock(&rxq->lock);
198 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
200 * If there are slots in the RX queue that need to be restocked,
201 * and we have free pre-allocated buffers, fill the ranks as much
202 * as we can, pulling from rx_free.
204 * This moves the 'write' index forward to catch up with 'processed', and
205 * also updates the memory address in the firmware to reference the new
208 static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
210 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
211 struct iwl_rxq *rxq = &trans_pcie->rxq;
212 struct iwl_rx_mem_buffer *rxb;
215 * If the device isn't enabled - not need to try to add buffers...
216 * This can happen when we stop the device and still have an interrupt
217 * pending. We stop the APM before we sync the interrupts because we
218 * have to (see comment there). On the other hand, since the APM is
219 * stopped, we cannot access the HW (in particular not prph).
220 * So don't try to restock if the APM has been already stopped.
222 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
225 spin_lock(&rxq->lock);
226 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
227 /* The overwritten rxb must be a used one */
228 rxb = rxq->queue[rxq->write];
229 BUG_ON(rxb && rxb->page);
231 /* Get next free Rx buffer, remove from free list */
232 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
234 list_del(&rxb->list);
236 /* Point to Rx buffer via next RBD in circular buffer */
237 rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
238 rxq->queue[rxq->write] = rxb;
239 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
242 spin_unlock(&rxq->lock);
243 /* If the pre-allocated buffer pool is dropping low, schedule to
245 if (rxq->free_count <= RX_LOW_WATERMARK)
246 schedule_work(&trans_pcie->rx_replenish);
248 /* If we've added more space for the firmware to place data, tell it.
249 * Increment device's write pointer in multiples of 8. */
250 if (rxq->write_actual != (rxq->write & ~0x7)) {
251 spin_lock(&rxq->lock);
252 iwl_pcie_rxq_inc_wr_ptr(trans);
253 spin_unlock(&rxq->lock);
258 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
260 * A used RBD is an Rx buffer that has been given to the stack. To use it again
261 * a page must be allocated and the RBD must point to the page. This function
262 * doesn't change the HW pointer but handles the list of pages that is used by
263 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
266 static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
268 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
269 struct iwl_rxq *rxq = &trans_pcie->rxq;
270 struct iwl_rx_mem_buffer *rxb;
272 gfp_t gfp_mask = priority;
275 spin_lock(&rxq->lock);
276 if (list_empty(&rxq->rx_used)) {
277 spin_unlock(&rxq->lock);
280 spin_unlock(&rxq->lock);
282 if (rxq->free_count > RX_LOW_WATERMARK)
283 gfp_mask |= __GFP_NOWARN;
285 if (trans_pcie->rx_page_order > 0)
286 gfp_mask |= __GFP_COMP;
288 /* Alloc a new receive buffer */
289 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
292 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
294 trans_pcie->rx_page_order);
296 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
298 IWL_CRIT(trans, "Failed to alloc_pages with %s."
299 "Only %u free buffers remaining.\n",
300 priority == GFP_ATOMIC ?
301 "GFP_ATOMIC" : "GFP_KERNEL",
303 /* We don't reschedule replenish work here -- we will
304 * call the restock method and if it still needs
305 * more buffers it will schedule replenish */
309 spin_lock(&rxq->lock);
311 if (list_empty(&rxq->rx_used)) {
312 spin_unlock(&rxq->lock);
313 __free_pages(page, trans_pcie->rx_page_order);
316 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
318 list_del(&rxb->list);
319 spin_unlock(&rxq->lock);
323 /* Get physical address of the RB */
325 dma_map_page(trans->dev, page, 0,
326 PAGE_SIZE << trans_pcie->rx_page_order,
328 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
330 spin_lock(&rxq->lock);
331 list_add(&rxb->list, &rxq->rx_used);
332 spin_unlock(&rxq->lock);
333 __free_pages(page, trans_pcie->rx_page_order);
336 /* dma address must be no more than 36 bits */
337 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
338 /* and also 256 byte aligned! */
339 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
341 spin_lock(&rxq->lock);
343 list_add_tail(&rxb->list, &rxq->rx_free);
346 spin_unlock(&rxq->lock);
350 static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
352 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
353 struct iwl_rxq *rxq = &trans_pcie->rxq;
356 lockdep_assert_held(&rxq->lock);
358 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
359 if (!rxq->pool[i].page)
361 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
362 PAGE_SIZE << trans_pcie->rx_page_order,
364 __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
365 rxq->pool[i].page = NULL;
370 * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
372 * When moving to rx_free an page is allocated for the slot.
374 * Also restock the Rx queue via iwl_pcie_rxq_restock.
375 * This is called as a scheduled work item (except for during initialization)
377 static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
379 iwl_pcie_rxq_alloc_rbs(trans, gfp);
381 iwl_pcie_rxq_restock(trans);
384 static void iwl_pcie_rx_replenish_work(struct work_struct *data)
386 struct iwl_trans_pcie *trans_pcie =
387 container_of(data, struct iwl_trans_pcie, rx_replenish);
389 iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
392 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
394 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
395 struct iwl_rxq *rxq = &trans_pcie->rxq;
396 struct device *dev = trans->dev;
398 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
400 spin_lock_init(&rxq->lock);
402 if (WARN_ON(rxq->bd || rxq->rb_stts))
405 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
406 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
407 &rxq->bd_dma, GFP_KERNEL);
411 /*Allocate the driver's pointer to receive buffer status */
412 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
413 &rxq->rb_stts_dma, GFP_KERNEL);
420 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
421 rxq->bd, rxq->bd_dma);
428 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
430 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
432 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
434 if (trans_pcie->rx_buf_size_8k)
435 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
437 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
440 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
441 /* reset and flush pointers */
442 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
443 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
444 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
446 /* Reset driver's Rx queue write index */
447 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
449 /* Tell device where to find RBD circular buffer in DRAM */
450 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
451 (u32)(rxq->bd_dma >> 8));
453 /* Tell device where in DRAM to update its Rx status */
454 iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
455 rxq->rb_stts_dma >> 4);
458 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
459 * the credit mechanism in 5000 HW RX FIFO
460 * Direct rx interrupts to hosts
461 * Rx buffer size 4 or 8k
465 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
466 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
467 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
468 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
470 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
471 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
473 /* Set interrupt coalescing timer to default (2048 usecs) */
474 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
476 /* W/A for interrupt coalescing bug in 7260 and 3160 */
477 if (trans->cfg->host_interrupt_operation_mode)
478 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
481 static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
485 lockdep_assert_held(&rxq->lock);
487 INIT_LIST_HEAD(&rxq->rx_free);
488 INIT_LIST_HEAD(&rxq->rx_used);
491 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
492 list_add(&rxq->pool[i].list, &rxq->rx_used);
495 int iwl_pcie_rx_init(struct iwl_trans *trans)
497 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
498 struct iwl_rxq *rxq = &trans_pcie->rxq;
502 err = iwl_pcie_rx_alloc(trans);
507 spin_lock(&rxq->lock);
509 INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
511 /* free all first - we might be reconfigured for a different size */
512 iwl_pcie_rxq_free_rbs(trans);
513 iwl_pcie_rx_init_rxb_lists(rxq);
515 for (i = 0; i < RX_QUEUE_SIZE; i++)
516 rxq->queue[i] = NULL;
518 /* Set us so that we have processed and used all buffers, but have
519 * not restocked the Rx queue with fresh buffers */
520 rxq->read = rxq->write = 0;
521 rxq->write_actual = 0;
522 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
523 spin_unlock(&rxq->lock);
525 iwl_pcie_rx_replenish(trans, GFP_KERNEL);
527 iwl_pcie_rx_hw_init(trans, rxq);
529 spin_lock(&rxq->lock);
530 iwl_pcie_rxq_inc_wr_ptr(trans);
531 spin_unlock(&rxq->lock);
536 void iwl_pcie_rx_free(struct iwl_trans *trans)
538 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
539 struct iwl_rxq *rxq = &trans_pcie->rxq;
541 /*if rxq->bd is NULL, it means that nothing has been allocated,
544 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
548 cancel_work_sync(&trans_pcie->rx_replenish);
550 spin_lock(&rxq->lock);
551 iwl_pcie_rxq_free_rbs(trans);
552 spin_unlock(&rxq->lock);
554 dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
555 rxq->bd, rxq->bd_dma);
560 dma_free_coherent(trans->dev,
561 sizeof(struct iwl_rb_status),
562 rxq->rb_stts, rxq->rb_stts_dma);
564 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
565 rxq->rb_stts_dma = 0;
569 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
570 struct iwl_rx_mem_buffer *rxb)
572 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
573 struct iwl_rxq *rxq = &trans_pcie->rxq;
574 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
575 bool page_stolen = false;
576 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
582 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
584 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
585 struct iwl_rx_packet *pkt;
588 int index, cmd_index, len;
589 struct iwl_rx_cmd_buffer rxcb = {
591 ._rx_page_order = trans_pcie->rx_page_order,
593 ._page_stolen = false,
597 pkt = rxb_addr(&rxcb);
599 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
603 "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
605 get_cmd_string(trans_pcie, pkt->hdr.cmd),
606 pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
608 len = iwl_rx_packet_len(pkt);
609 len += sizeof(u32); /* account for status word */
610 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
611 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
613 /* Reclaim a command buffer only if this packet is a response
614 * to a (driver-originated) command.
615 * If the packet (e.g. Rx frame) originated from uCode,
616 * there is no command buffer to reclaim.
617 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
618 * but apparently a few don't get set; catch them here. */
619 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
623 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
624 if (trans_pcie->no_reclaim_cmds[i] ==
632 sequence = le16_to_cpu(pkt->hdr.sequence);
633 index = SEQ_TO_INDEX(sequence);
634 cmd_index = get_cmd_index(&txq->q, index);
636 iwl_op_mode_rx(trans->op_mode, &rxcb);
639 kzfree(txq->entries[cmd_index].free_buf);
640 txq->entries[cmd_index].free_buf = NULL;
644 * After here, we should always check rxcb._page_stolen,
645 * if it is true then one of the handlers took the page.
649 /* Invoke any callbacks, transfer the buffer to caller,
650 * and fire off the (possibly) blocking
651 * iwl_trans_send_cmd()
652 * as we reclaim the driver command queue */
653 if (!rxcb._page_stolen)
654 iwl_pcie_hcmd_complete(trans, &rxcb);
656 IWL_WARN(trans, "Claim null rxb?\n");
659 page_stolen |= rxcb._page_stolen;
660 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
663 /* page was stolen from us -- free our reference */
665 __free_pages(rxb->page, trans_pcie->rx_page_order);
669 /* Reuse the page if possible. For notification packets and
670 * SKBs that fail to Rx correctly, add them back into the
671 * rx_free list for reuse later. */
672 if (rxb->page != NULL) {
674 dma_map_page(trans->dev, rxb->page, 0,
675 PAGE_SIZE << trans_pcie->rx_page_order,
677 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
679 * free the page(s) as well to not break
680 * the invariant that the items on the used
681 * list have no page(s)
683 __free_pages(rxb->page, trans_pcie->rx_page_order);
685 list_add_tail(&rxb->list, &rxq->rx_used);
687 list_add_tail(&rxb->list, &rxq->rx_free);
691 list_add_tail(&rxb->list, &rxq->rx_used);
695 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
697 static void iwl_pcie_rx_handle(struct iwl_trans *trans)
699 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
700 struct iwl_rxq *rxq = &trans_pcie->rxq;
707 spin_lock(&rxq->lock);
708 /* uCode's read index (stored in shared DRAM) indicates the last Rx
709 * buffer that the driver may process (last buffer filled by ucode). */
710 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
713 /* Rx interrupt, but nothing sent from uCode */
715 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
717 /* calculate total frames need to be restock after handling RX */
718 total_empty = r - rxq->write_actual;
720 total_empty += RX_QUEUE_SIZE;
722 if (total_empty > (RX_QUEUE_SIZE / 2))
726 struct iwl_rx_mem_buffer *rxb;
729 rxq->queue[i] = NULL;
731 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
733 iwl_pcie_rx_handle_rb(trans, rxb);
735 i = (i + 1) & RX_QUEUE_MASK;
736 /* If there are a lot of unused frames,
737 * restock the Rx queue so ucode wont assert. */
742 spin_unlock(&rxq->lock);
743 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
750 /* Backtrack one entry */
752 spin_unlock(&rxq->lock);
755 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
757 iwl_pcie_rxq_restock(trans);
759 if (trans_pcie->napi.poll)
760 napi_gro_flush(&trans_pcie->napi, false);
764 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
766 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
768 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
771 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
772 if (trans->cfg->internal_wimax_coex &&
773 !trans->cfg->apmg_not_supported &&
774 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
775 APMS_CLK_VAL_MRB_FUNC_MODE) ||
776 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
777 APMG_PS_CTRL_VAL_RESET_REQ))) {
778 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
779 iwl_op_mode_wimax_active(trans->op_mode);
780 wake_up(&trans_pcie->wait_command_queue);
784 iwl_pcie_dump_csr(trans);
785 iwl_dump_fh(trans, NULL);
788 /* The STATUS_FW_ERROR bit is set in this function. This must happen
789 * before we wake up the command caller, to ensure a proper cleanup. */
790 iwl_trans_fw_error(trans);
793 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
794 del_timer(&trans_pcie->txq[i].stuck_timer);
796 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
797 wake_up(&trans_pcie->wait_command_queue);
800 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
804 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
806 trace_iwlwifi_dev_irq(trans->dev);
808 /* Discover which interrupts are active/pending */
809 inta = iwl_read32(trans, CSR_INT);
811 /* the thread will service interrupts and re-enable them */
815 /* a device (PCI-E) page is 4096 bytes long */
817 #define ICT_SIZE (1 << ICT_SHIFT)
818 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
820 /* interrupt handler using ict table, with this interrupt driver will
821 * stop using INTA register to get device's interrupt, reading this register
822 * is expensive, device will write interrupts in ICT dram table, increment
823 * index then will fire interrupt to driver, driver will OR all ICT table
824 * entries from current index up to table entry with 0 value. the result is
825 * the interrupt we need to service, driver will set the entries back to 0 and
828 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
830 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
835 trace_iwlwifi_dev_irq(trans->dev);
837 /* Ignore interrupt if there's nothing in NIC to service.
838 * This may be due to IRQ shared with another device,
839 * or due to sporadic interrupts thrown from our NIC. */
840 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
841 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
846 * Collect all entries up to the first 0, starting from ict_index;
847 * note we already read at ict_index.
851 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
852 trans_pcie->ict_index, read);
853 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
854 trans_pcie->ict_index =
855 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
857 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
858 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
862 /* We should not get this value, just ignore it. */
863 if (val == 0xffffffff)
867 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
868 * (bit 15 before shifting it to 31) to clear when using interrupt
869 * coalescing. fortunately, bits 18 and 19 stay set when this happens
870 * so we use them to decide on the real state of the Rx bit.
871 * In order words, bit 15 is set if bit 18 or bit 19 are set.
876 inta = (0xff & val) | ((0xff00 & val) << 16);
880 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
882 struct iwl_trans *trans = dev_id;
883 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
884 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
888 lock_map_acquire(&trans->sync_cmd_lockdep_map);
890 spin_lock(&trans_pcie->irq_lock);
892 /* dram interrupt table not set yet,
893 * use legacy interrupt.
895 if (likely(trans_pcie->use_ict))
896 inta = iwl_pcie_int_cause_ict(trans);
898 inta = iwl_pcie_int_cause_non_ict(trans);
900 if (iwl_have_debug_level(IWL_DL_ISR)) {
902 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
903 inta, trans_pcie->inta_mask,
904 iwl_read32(trans, CSR_INT_MASK),
905 iwl_read32(trans, CSR_FH_INT_STATUS));
906 if (inta & (~trans_pcie->inta_mask))
908 "We got a masked interrupt (0x%08x)\n",
909 inta & (~trans_pcie->inta_mask));
912 inta &= trans_pcie->inta_mask;
915 * Ignore interrupt if there's nothing in NIC to service.
916 * This may be due to IRQ shared with another device,
917 * or due to sporadic interrupts thrown from our NIC.
919 if (unlikely(!inta)) {
920 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
922 * Re-enable interrupts here since we don't
923 * have anything to service
925 if (test_bit(STATUS_INT_ENABLED, &trans->status))
926 iwl_enable_interrupts(trans);
927 spin_unlock(&trans_pcie->irq_lock);
928 lock_map_release(&trans->sync_cmd_lockdep_map);
932 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
934 * Hardware disappeared. It might have
935 * already raised an interrupt.
937 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
938 spin_unlock(&trans_pcie->irq_lock);
942 /* Ack/clear/reset pending uCode interrupts.
943 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
945 /* There is a hardware bug in the interrupt mask function that some
946 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
947 * they are disabled in the CSR_INT_MASK register. Furthermore the
948 * ICT interrupt handling mechanism has another bug that might cause
949 * these unmasked interrupts fail to be detected. We workaround the
950 * hardware bugs here by ACKing all the possible interrupts so that
951 * interrupt coalescing can still be achieved.
953 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
955 if (iwl_have_debug_level(IWL_DL_ISR))
956 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
957 inta, iwl_read32(trans, CSR_INT_MASK));
959 spin_unlock(&trans_pcie->irq_lock);
961 /* Now service all interrupt bits discovered above. */
962 if (inta & CSR_INT_BIT_HW_ERR) {
963 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
965 /* Tell the device to stop sending interrupts */
966 iwl_disable_interrupts(trans);
969 iwl_pcie_irq_handle_error(trans);
971 handled |= CSR_INT_BIT_HW_ERR;
976 if (iwl_have_debug_level(IWL_DL_ISR)) {
977 /* NIC fires this, but we don't use it, redundant with WAKEUP */
978 if (inta & CSR_INT_BIT_SCD) {
980 "Scheduler finished to transmit the frame/frames.\n");
984 /* Alive notification via Rx interrupt will do the real work */
985 if (inta & CSR_INT_BIT_ALIVE) {
986 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
991 /* Safely ignore these bits for debug checks below */
992 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
994 /* HW RF KILL switch toggled */
995 if (inta & CSR_INT_BIT_RF_KILL) {
998 hw_rfkill = iwl_is_rfkill_set(trans);
999 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1000 hw_rfkill ? "disable radio" : "enable radio");
1002 isr_stats->rfkill++;
1004 mutex_lock(&trans_pcie->mutex);
1005 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1006 mutex_unlock(&trans_pcie->mutex);
1008 set_bit(STATUS_RFKILL, &trans->status);
1009 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1011 IWL_DEBUG_RF_KILL(trans,
1012 "Rfkill while SYNC HCMD in flight\n");
1013 wake_up(&trans_pcie->wait_command_queue);
1015 clear_bit(STATUS_RFKILL, &trans->status);
1018 handled |= CSR_INT_BIT_RF_KILL;
1021 /* Chip got too hot and stopped itself */
1022 if (inta & CSR_INT_BIT_CT_KILL) {
1023 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1024 isr_stats->ctkill++;
1025 handled |= CSR_INT_BIT_CT_KILL;
1028 /* Error detected by uCode */
1029 if (inta & CSR_INT_BIT_SW_ERR) {
1030 IWL_ERR(trans, "Microcode SW error detected. "
1031 " Restarting 0x%X.\n", inta);
1033 iwl_pcie_irq_handle_error(trans);
1034 handled |= CSR_INT_BIT_SW_ERR;
1037 /* uCode wakes up after power-down sleep */
1038 if (inta & CSR_INT_BIT_WAKEUP) {
1039 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1040 iwl_pcie_rxq_check_wrptr(trans);
1041 iwl_pcie_txq_check_wrptrs(trans);
1043 isr_stats->wakeup++;
1045 handled |= CSR_INT_BIT_WAKEUP;
1048 /* All uCode command responses, including Tx command responses,
1049 * Rx "responses" (frame-received notification), and other
1050 * notifications from uCode come through here*/
1051 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1052 CSR_INT_BIT_RX_PERIODIC)) {
1053 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1054 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1055 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1056 iwl_write32(trans, CSR_FH_INT_STATUS,
1057 CSR_FH_INT_RX_MASK);
1059 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1060 handled |= CSR_INT_BIT_RX_PERIODIC;
1062 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1064 /* Sending RX interrupt require many steps to be done in the
1066 * 1- write interrupt to current index in ICT table.
1068 * 3- update RX shared data to indicate last write index.
1069 * 4- send interrupt.
1070 * This could lead to RX race, driver could receive RX interrupt
1071 * but the shared data changes does not reflect this;
1072 * periodic interrupt will detect any dangling Rx activity.
1075 /* Disable periodic interrupt; we use it as just a one-shot. */
1076 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1077 CSR_INT_PERIODIC_DIS);
1080 * Enable periodic interrupt in 8 msec only if we received
1081 * real RX interrupt (instead of just periodic int), to catch
1082 * any dangling Rx interrupt. If it was just the periodic
1083 * interrupt, there was no dangling Rx activity, and no need
1084 * to extend the periodic interrupt; one-shot is enough.
1086 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1087 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1088 CSR_INT_PERIODIC_ENA);
1093 iwl_pcie_rx_handle(trans);
1097 /* This "Tx" DMA channel is used only for loading uCode */
1098 if (inta & CSR_INT_BIT_FH_TX) {
1099 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1100 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1102 handled |= CSR_INT_BIT_FH_TX;
1103 /* Wake up uCode load routine, now that load is complete */
1104 trans_pcie->ucode_write_complete = true;
1105 wake_up(&trans_pcie->ucode_write_waitq);
1108 if (inta & ~handled) {
1109 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1110 isr_stats->unhandled++;
1113 if (inta & ~(trans_pcie->inta_mask)) {
1114 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1115 inta & ~trans_pcie->inta_mask);
1118 /* Re-enable all interrupts */
1119 /* only Re-enable if disabled by irq */
1120 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1121 iwl_enable_interrupts(trans);
1122 /* Re-enable RF_KILL if it occurred */
1123 else if (handled & CSR_INT_BIT_RF_KILL)
1124 iwl_enable_rfkill_int(trans);
1127 lock_map_release(&trans->sync_cmd_lockdep_map);
1131 /******************************************************************************
1135 ******************************************************************************/
1137 /* Free dram table */
1138 void iwl_pcie_free_ict(struct iwl_trans *trans)
1140 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1142 if (trans_pcie->ict_tbl) {
1143 dma_free_coherent(trans->dev, ICT_SIZE,
1144 trans_pcie->ict_tbl,
1145 trans_pcie->ict_tbl_dma);
1146 trans_pcie->ict_tbl = NULL;
1147 trans_pcie->ict_tbl_dma = 0;
1152 * allocate dram shared table, it is an aligned memory
1153 * block of ICT_SIZE.
1154 * also reset all data related to ICT table interrupt.
1156 int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1158 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1160 trans_pcie->ict_tbl =
1161 dma_zalloc_coherent(trans->dev, ICT_SIZE,
1162 &trans_pcie->ict_tbl_dma,
1164 if (!trans_pcie->ict_tbl)
1167 /* just an API sanity check ... it is guaranteed to be aligned */
1168 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1169 iwl_pcie_free_ict(trans);
1173 IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n",
1174 (unsigned long long)trans_pcie->ict_tbl_dma,
1175 trans_pcie->ict_tbl);
1180 /* Device is going up inform it about using ICT interrupt table,
1181 * also we need to tell the driver to start using ICT interrupt.
1183 void iwl_pcie_reset_ict(struct iwl_trans *trans)
1185 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1188 if (!trans_pcie->ict_tbl)
1191 spin_lock(&trans_pcie->irq_lock);
1192 iwl_disable_interrupts(trans);
1194 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1196 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1198 val |= CSR_DRAM_INT_TBL_ENABLE |
1199 CSR_DRAM_INIT_TBL_WRAP_CHECK |
1200 CSR_DRAM_INIT_TBL_WRITE_POINTER;
1202 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1204 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1205 trans_pcie->use_ict = true;
1206 trans_pcie->ict_index = 0;
1207 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1208 iwl_enable_interrupts(trans);
1209 spin_unlock(&trans_pcie->irq_lock);
1212 /* Device is going down disable ict interrupt usage */
1213 void iwl_pcie_disable_ict(struct iwl_trans *trans)
1215 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1217 spin_lock(&trans_pcie->irq_lock);
1218 trans_pcie->use_ict = false;
1219 spin_unlock(&trans_pcie->irq_lock);
1222 irqreturn_t iwl_pcie_isr(int irq, void *data)
1224 struct iwl_trans *trans = data;
1229 /* Disable (but don't clear!) interrupts here to avoid
1230 * back-to-back ISRs and sporadic interrupts from our NIC.
1231 * If we have something to service, the tasklet will re-enable ints.
1232 * If we *don't* have something, we'll re-enable before leaving here.
1234 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1236 return IRQ_WAKE_THREAD;